From 28c02a7164e5baeb65f69233ea307cf06a70e01b Mon Sep 17 00:00:00 2001 From: Dieter Plaetinck Date: Thu, 30 Nov 2017 15:05:42 -0500 Subject: [PATCH] switch govendor->dep note: dep ensure adds a bunch of stuff that govendor used to filter out: * test files * main files * non-go files * root and sub packages when we only need 1 specific package in a tree * other dependencies of root/sub packages that we didn't need in the first place but I then ran `dep prune` which cleans much of it up again vendor updates: package used by comments vendor/github.com/golang/snappy cmd/mt-replicator-via-tsdb google/go-querystring MT clustering minor github.com/hailocab/go-hostpool cassandra github.com/uber/jaeger-client-go tracing minor gopkg.in/raintank/schema.v0 everything minor v1.5 --- Gopkg.lock | 454 + Gopkg.toml | 164 + docs/development.md | 2 +- scripts/vendor_health.sh | 21 +- .../github.com/DataDog/datadog-go/.travis.yml | 8 + .../DataDog/datadog-go/CHANGELOG.md | 54 + .../github.com/DataDog/datadog-go/LICENSE.txt | 19 + .../github.com/DataDog/datadog-go/README.md | 33 + .../DataDog/datadog-go/statsd/README.md | 52 + .../DataDog/datadog-go/statsd/statsd.go | 580 ++ .../DataDog/datadog-go/statsd/statsd_test.go | 620 ++ .../Dieterbe/artisanalhistogram/README.md | 102 + .../artisanalhistogram/hist15s/hist15s.go | 2 +- .../hist15s/hist15s_test.go | 212 + .../Dieterbe/profiletrigger/README.md | 13 + .../github.com/Microsoft/go-winio/.gitignore | 1 + vendor/github.com/Microsoft/go-winio/LICENSE | 22 + .../github.com/Microsoft/go-winio/README.md | 22 + .../github.com/Microsoft/go-winio/backup.go | 280 + .../Microsoft/go-winio/backup_test.go | 255 + vendor/github.com/Microsoft/go-winio/ea.go | 137 + .../github.com/Microsoft/go-winio/ea_test.go | 89 + vendor/github.com/Microsoft/go-winio/file.go | 310 + .../github.com/Microsoft/go-winio/fileinfo.go | 60 + vendor/github.com/Microsoft/go-winio/pipe.go | 404 + .../Microsoft/go-winio/pipe_test.go | 424 + .../Microsoft/go-winio/privilege.go | 202 + .../Microsoft/go-winio/privileges_test.go | 17 + .../github.com/Microsoft/go-winio/reparse.go | 128 + vendor/github.com/Microsoft/go-winio/sd.go | 98 + .../github.com/Microsoft/go-winio/sd_test.go | 26 + .../github.com/Microsoft/go-winio/syscall.go | 3 + .../Microsoft/go-winio/zsyscall_windows.go | 528 ++ vendor/github.com/Shopify/sarama/.gitignore | 24 + vendor/github.com/Shopify/sarama/.travis.yml | 33 + .../sarama/api_versions_request_test.go | 14 + .../sarama/api_versions_response_test.go | 32 + .../Shopify/sarama/async_producer_test.go | 801 ++ .../github.com/Shopify/sarama/broker_test.go | 253 + .../github.com/Shopify/sarama/client_test.go | 608 ++ .../github.com/Shopify/sarama/config_test.go | 26 + .../sarama/consumer_group_members_test.go | 73 + .../sarama/consumer_metadata_request_test.go | 19 + .../sarama/consumer_metadata_response_test.go | 35 + .../Shopify/sarama/consumer_test.go | 854 ++ .../sarama/describe_groups_request_test.go | 34 + .../sarama/describe_groups_response_test.go | 91 + .../Shopify/sarama/fetch_request_test.go | 34 + .../Shopify/sarama/fetch_response_test.go | 84 + .../Shopify/sarama/functional_client_test.go | 90 + .../sarama/functional_consumer_test.go | 61 + .../sarama/functional_offset_manager_test.go | 47 + .../sarama/functional_producer_test.go | 203 + .../Shopify/sarama/functional_test.go | 148 + .../Shopify/sarama/heartbeat_request_test.go | 21 + .../Shopify/sarama/heartbeat_response_test.go | 18 + .../Shopify/sarama/join_group_request_test.go | 41 + .../sarama/join_group_response_test.go | 98 + .../sarama/leave_group_request_test.go | 19 + .../sarama/leave_group_response_test.go | 24 + .../sarama/list_groups_request_test.go | 7 + .../sarama/list_groups_response_test.go | 58 + .../github.com/Shopify/sarama/message_test.go | 113 + .../Shopify/sarama/metadata_request_test.go | 29 + .../Shopify/sarama/metadata_response_test.go | 139 + .../sarama/offset_commit_request_test.go | 90 + .../sarama/offset_commit_response_test.go | 24 + .../sarama/offset_fetch_request_test.go | 31 + .../sarama/offset_fetch_response_test.go | 22 + .../Shopify/sarama/offset_manager_test.go | 369 + .../Shopify/sarama/offset_request_test.go | 26 + .../Shopify/sarama/offset_response_test.go | 62 + .../Shopify/sarama/partitioner_test.go | 215 + .../Shopify/sarama/produce_request_test.go | 47 + .../Shopify/sarama/produce_response_test.go | 67 + .../Shopify/sarama/produce_set_test.go | 143 + .../github.com/Shopify/sarama/request_test.go | 87 + .../Shopify/sarama/response_header_test.go | 21 + .../sarama/sasl_handshake_request_test.go | 17 + .../sarama/sasl_handshake_response_test.go | 24 + .../Shopify/sarama/sync_group_request_test.go | 38 + .../sarama/sync_group_response_test.go | 40 + .../Shopify/sarama/sync_producer_test.go | 196 + .../github.com/Shopify/sarama/utils_test.go | 21 + vendor/github.com/Sirupsen/logrus/.gitignore | 1 + vendor/github.com/Sirupsen/logrus/.travis.yml | 8 + .../github.com/Sirupsen/logrus/entry_test.go | 53 + .../Sirupsen/logrus/formatter_bench_test.go | 88 + .../github.com/Sirupsen/logrus/hook_test.go | 122 + .../Sirupsen/logrus/json_formatter_test.go | 120 + .../github.com/Sirupsen/logrus/logrus_test.go | 301 + .../Sirupsen/logrus/text_formatter_test.go | 61 + vendor/github.com/Unknwon/com/.gitignore | 24 + vendor/github.com/Unknwon/com/.travis.yml | 13 + vendor/github.com/Unknwon/com/cmd_test.go | 140 + vendor/github.com/Unknwon/com/convert_test.go | 56 + vendor/github.com/Unknwon/com/dir_test.go | 56 + vendor/github.com/Unknwon/com/example_test.go | 299 + vendor/github.com/Unknwon/com/file_test.go | 61 + vendor/github.com/Unknwon/com/html_test.go | 35 + vendor/github.com/Unknwon/com/http_test.go | 111 + vendor/github.com/Unknwon/com/math_test.go | 44 + vendor/github.com/Unknwon/com/path_test.go | 67 + vendor/github.com/Unknwon/com/regex_test.go | 70 + vendor/github.com/Unknwon/com/slice_test.go | 99 + vendor/github.com/Unknwon/com/string_test.go | 108 + .../alyu/configparser/configparser_test.go | 483 + .../alyu/configparser/example_test.go | 73 + vendor/github.com/apache/thrift/.clang-format | 56 + vendor/github.com/apache/thrift/.dockerignore | 1 + vendor/github.com/apache/thrift/.editorconfig | 112 + .../github.com/apache/thrift/.gitattributes | 1 + vendor/github.com/apache/thrift/.gitignore | 326 + vendor/github.com/apache/thrift/.travis.yml | 199 + vendor/github.com/apache/thrift/CHANGES | 2366 +++++ .../github.com/apache/thrift/CMakeLists.txt | 117 + .../github.com/apache/thrift/CONTRIBUTING.md | 49 + vendor/github.com/apache/thrift/Dockerfile | 61 + vendor/github.com/apache/thrift/Makefile.am | 131 + vendor/github.com/apache/thrift/README.md | 166 + .../github.com/apache/thrift/Thrift.podspec | 18 + vendor/github.com/apache/thrift/appveyor.yml | 93 + vendor/github.com/apache/thrift/bootstrap.sh | 54 + vendor/github.com/apache/thrift/bower.json | 16 + vendor/github.com/apache/thrift/cleanup.sh | 89 + vendor/github.com/apache/thrift/composer.json | 30 + vendor/github.com/apache/thrift/configure.ac | 959 ++ vendor/github.com/apache/thrift/doap.rdf | 132 + .../github.com/apache/thrift/lib/Makefile.am | 109 + .../apache/thrift/lib/go/Makefile.am | 42 + .../github.com/apache/thrift/lib/go/README.md | 81 + .../apache/thrift/lib/go/coding_standards.md | 1 + .../go/thrift/application_exception_test.go | 41 + .../lib/go/thrift/binary_protocol_test.go | 28 + .../lib/go/thrift/buffered_transport_test.go | 29 + .../lib/go/thrift/compact_protocol_test.go | 53 + .../thrift/lib/go/thrift/exception_test.go | 69 + .../lib/go/thrift/framed_transport_test.go | 29 + .../thrift/lib/go/thrift/http_client_test.go | 106 + .../lib/go/thrift/iostream_transport_test.go | 52 + .../lib/go/thrift/json_protocol_test.go | 649 ++ .../lib/go/thrift/lowlevel_benchmarks_test.go | 396 + .../lib/go/thrift/memory_buffer_test.go | 29 + .../thrift/lib/go/thrift/protocol_test.go | 479 + .../lib/go/thrift/rich_transport_test.go | 85 + .../thrift/lib/go/thrift/serializer_test.go | 169 + .../lib/go/thrift/serializer_types_test.go | 633 ++ .../lib/go/thrift/server_socket_test.go | 50 + .../thrift/lib/go/thrift/server_test.go | 28 + .../go/thrift/simple_json_protocol_test.go | 715 ++ .../lib/go/thrift/transport_exception_test.go | 60 + .../thrift/lib/go/thrift/transport_test.go | 176 + .../lib/go/thrift/zlib_transport_test.go | 33 + vendor/github.com/apache/thrift/package.json | 54 + .../apache/thrift/sonar-project.properties | 140 + vendor/github.com/araddon/gou/.gitignore | 22 + vendor/github.com/araddon/gou/coerce_test.go | 25 + .../github.com/araddon/gou/jsonhelper_test.go | 179 + vendor/github.com/araddon/gou/uid_test.go | 11 + vendor/github.com/armon/go-metrics/.gitignore | 22 + .../armon/go-metrics/inmem_signal_test.go | 46 + .../github.com/armon/go-metrics/inmem_test.go | 104 + .../armon/go-metrics/metrics_test.go | 262 + .../github.com/armon/go-metrics/sink_test.go | 120 + .../github.com/armon/go-metrics/start_test.go | 110 + .../armon/go-metrics/statsd_test.go | 105 + .../armon/go-metrics/statsite_test.go | 101 + .../github.com/bitly/go-hostpool/.gitignore | 22 + .../github.com/bitly/go-hostpool/.travis.yml | 0 .../bitly/go-hostpool/example_test.go | 13 + .../bitly/go-hostpool/hostpool_test.go | 145 + .../github.com/bsm/sarama-cluster/.gitignore | 4 + .../github.com/bsm/sarama-cluster/.travis.yml | 14 + .../bsm/sarama-cluster/balancer_test.go | 124 + .../bsm/sarama-cluster/cluster_test.go | 192 + .../bsm/sarama-cluster/config_test.go | 25 + .../bsm/sarama-cluster/consumer_test.go | 208 + .../bsm/sarama-cluster/partitions_test.go | 132 + .../codahale/hdrhistogram/.travis.yml | 5 + .../codahale/hdrhistogram/hdr_test.go | 388 + .../codahale/hdrhistogram/window_test.go | 64 + vendor/github.com/codeskyblue/go-uuid/dce.go | 84 - vendor/github.com/codeskyblue/go-uuid/doc.go | 8 - vendor/github.com/codeskyblue/go-uuid/hash.go | 53 - vendor/github.com/codeskyblue/go-uuid/node.go | 101 - vendor/github.com/codeskyblue/go-uuid/time.go | 112 - vendor/github.com/codeskyblue/go-uuid/util.go | 43 - vendor/github.com/codeskyblue/go-uuid/uuid.go | 163 - .../codeskyblue/go-uuid/version1.go | 41 - .../codeskyblue/go-uuid/version4.go | 25 - vendor/github.com/davecgh/go-spew/.gitignore | 22 + vendor/github.com/davecgh/go-spew/.travis.yml | 14 + vendor/github.com/davecgh/go-spew/README.md | 205 + .../github.com/davecgh/go-spew/cov_report.sh | 22 + .../davecgh/go-spew/spew/common_test.go | 298 + .../davecgh/go-spew/spew/dump_test.go | 1042 +++ .../davecgh/go-spew/spew/dumpcgo_test.go | 99 + .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 + .../davecgh/go-spew/spew/example_test.go | 226 + .../davecgh/go-spew/spew/format_test.go | 1558 ++++ .../davecgh/go-spew/spew/internal_test.go | 87 + .../go-spew/spew/internalunsafe_test.go | 102 + .../davecgh/go-spew/spew/spew_test.go | 320 + .../davecgh/go-spew/test_coverage.txt | 61 + .../github.com/dgryski/go-bits/bits_test.go | 50 + .../dgryski/go-linlog/linlog_test.go | 146 + vendor/github.com/dgryski/go-tsz/.gitignore | 1 + vendor/github.com/dgryski/go-tsz/tsz_test.go | 275 + .../github.com/docker/distribution/.gitignore | 37 + .../github.com/docker/distribution/.mailmap | 19 + vendor/github.com/docker/distribution/AUTHORS | 182 + .../docker/distribution/BUILDING.md | 119 + .../docker/distribution/CHANGELOG.md | 114 + .../docker/distribution/CONTRIBUTING.md | 140 + .../github.com/docker/distribution/Dockerfile | 18 + vendor/github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/MAINTAINERS | 58 + .../github.com/docker/distribution/Makefile | 109 + .../github.com/docker/distribution/README.md | 131 + .../docker/distribution/RELEASE-CHECKLIST.md | 36 + .../github.com/docker/distribution/ROADMAP.md | 267 + .../github.com/docker/distribution/blobs.go | 257 + .../github.com/docker/distribution/circle.yml | 93 + .../docker/distribution/coverpkg.sh | 7 + .../docker/distribution/digest/digest.go | 139 + .../docker/distribution/digest/digest_test.go | 82 + .../docker/distribution/digest/digester.go | 155 + .../digest/digester_resumable_test.go | 21 + .../docker/distribution/digest/doc.go | 42 + .../docker/distribution/digest/set.go | 245 + .../docker/distribution/digest/set_test.go | 368 + .../docker/distribution/digest/verifiers.go | 44 + .../distribution/digest/verifiers_test.go | 49 + vendor/github.com/docker/distribution/doc.go | 7 + .../github.com/docker/distribution/errors.go | 115 + .../docker/distribution/manifests.go | 125 + .../distribution/reference/reference.go | 370 + .../distribution/reference/reference_test.go | 661 ++ .../docker/distribution/reference/regexp.go | 124 + .../distribution/reference/regexp_test.go | 489 ++ .../docker/distribution/registry.go | 97 + vendor/github.com/docker/distribution/tags.go | 27 + vendor/github.com/docker/docker/.dockerignore | 4 + vendor/github.com/docker/docker/.gitignore | 33 + vendor/github.com/docker/docker/.mailmap | 275 + vendor/github.com/docker/docker/AUTHORS | 1652 ++++ vendor/github.com/docker/docker/CHANGELOG.md | 3337 +++++++ .../github.com/docker/docker/CONTRIBUTING.md | 401 + vendor/github.com/docker/docker/Dockerfile | 246 + .../docker/docker/Dockerfile.aarch64 | 175 + .../github.com/docker/docker/Dockerfile.armhf | 182 + .../docker/docker/Dockerfile.ppc64le | 188 + .../github.com/docker/docker/Dockerfile.s390x | 190 + .../docker/docker/Dockerfile.simple | 73 + .../docker/docker/Dockerfile.solaris | 20 + .../docker/docker/Dockerfile.windows | 267 + vendor/github.com/docker/docker/LICENSE | 2 +- vendor/github.com/docker/docker/MAINTAINERS | 376 + vendor/github.com/docker/docker/Makefile | 147 + vendor/github.com/docker/docker/NOTICE | 2 +- vendor/github.com/docker/docker/README.md | 304 + vendor/github.com/docker/docker/ROADMAP.md | 118 + vendor/github.com/docker/docker/VENDORING.md | 45 + vendor/github.com/docker/docker/VERSION | 1 + vendor/github.com/docker/docker/api/README.md | 42 + vendor/github.com/docker/docker/api/common.go | 166 + .../docker/docker/api/common_test.go | 341 + .../docker/docker/api/common_unix.go | 6 + .../docker/docker/api/common_windows.go | 8 + .../docker/docker/api/swagger-gen.yaml | 12 + .../github.com/docker/docker/api/swagger.yaml | 7785 +++++++++++++++++ .../docker/docker/api/types/client.go | 80 +- .../docker/api/types/container/config.go | 11 +- .../api/types/container/container_changes.go | 21 - .../api/types/container/container_create.go | 2 +- .../api/types/container/container_top.go | 21 - .../api/types/container/container_update.go | 2 +- .../api/types/container/container_wait.go | 14 +- .../docker/api/types/container/host_config.go | 119 +- .../api/types/container/hostconfig_unix.go | 40 + .../api/types/container/hostconfig_windows.go | 53 +- .../api/types/container/waitcondition.go | 22 - .../docker/docker/api/types/events/events.go | 10 - .../docker/docker/api/types/filters/parse.go | 272 +- .../docker/api/types/filters/parse_test.go | 417 + .../docker/api/types/graph_driver_data.go | 17 - .../docker/api/types/image/image_history.go | 37 - .../api/types/image_delete_response_item.go | 15 - .../docker/docker/api/types/mount/mount.go | 25 +- .../docker/api/types/network/network.go | 51 +- .../docker/docker/api/types/plugin.go | 13 +- .../docker/api/types/plugin_responses.go | 23 +- .../api/types/reference/image_reference.go | 34 + .../types/reference/image_reference_test.go | 72 + .../docker/api/types/registry/authenticate.go | 2 +- .../docker/api/types/registry/registry.go | 21 +- .../docker/docker/api/types/seccomp.go | 2 +- .../docker/docker/api/types/stats.go | 3 - .../api/types/strslice/strslice_test.go | 86 + .../docker/docker/api/types/swarm/common.go | 17 +- .../docker/docker/api/types/swarm/config.go | 31 - .../docker/api/types/swarm/container.go | 33 +- .../docker/docker/api/types/swarm/network.go | 22 +- .../docker/docker/api/types/swarm/node.go | 1 - .../docker/docker/api/types/swarm/runtime.go | 19 - .../docker/api/types/swarm/runtime/gen.go | 3 - .../api/types/swarm/runtime/plugin.pb.go | 712 -- .../api/types/swarm/runtime/plugin.proto | 18 - .../docker/docker/api/types/swarm/secret.go | 3 +- .../docker/docker/api/types/swarm/service.go | 37 +- .../docker/docker/api/types/swarm/swarm.go | 30 +- .../docker/docker/api/types/swarm/task.go | 66 +- .../api/types/time/duration_convert_test.go | 26 + .../docker/docker/api/types/time/timestamp.go | 8 +- .../docker/api/types/time/timestamp_test.go | 93 + .../docker/docker/api/types/types.go | 150 +- .../docker/api/types/versions/README.md | 4 +- .../docker/api/types/versions/compare_test.go | 26 + .../docker/docker/api/types/volume.go | 17 +- .../docker/api/types/volume/volumes_create.go | 2 +- .../docker/api/types/volume/volumes_list.go | 2 +- vendor/github.com/docker/docker/cli/cobra.go | 139 + vendor/github.com/docker/docker/cli/error.go | 33 + .../github.com/docker/docker/cli/required.go | 96 + .../github.com/docker/docker/client/README.md | 35 + .../docker/docker/client/checkpoint_create.go | 13 + .../docker/client/checkpoint_create_test.go | 73 + .../docker/docker/client/checkpoint_delete.go | 20 + .../docker/client/checkpoint_delete_test.go | 54 + .../docker/docker/client/checkpoint_list.go | 28 + .../docker/client/checkpoint_list_test.go | 57 + .../github.com/docker/docker/client/client.go | 246 + .../docker/docker/client/client_mock_test.go | 45 + .../docker/docker/client/client_test.go | 283 + .../docker/docker/client/client_unix.go | 6 + .../docker/docker/client/client_windows.go | 4 + .../docker/docker/client/container_attach.go | 37 + .../docker/docker/client/container_commit.go | 53 + .../docker/client/container_commit_test.go | 96 + .../docker/docker/client/container_copy.go | 97 + .../docker/client/container_copy_test.go | 244 + .../docker/docker/client/container_create.go | 50 + .../docker/client/container_create_test.go | 76 + .../docker/docker/client/container_diff.go | 23 + .../docker/client/container_diff_test.go | 61 + .../docker/docker/client/container_exec.go | 54 + .../docker/client/container_exec_test.go | 157 + .../docker/docker/client/container_export.go | 20 + .../docker/client/container_export_test.go | 50 + .../docker/docker/client/container_inspect.go | 54 + .../docker/client/container_inspect_test.go | 125 + .../docker/docker/client/container_kill.go | 17 + .../docker/client/container_kill_test.go | 46 + .../docker/docker/client/container_list.go | 56 + .../docker/client/container_list_test.go | 96 + .../docker/docker/client/container_logs.go | 52 + .../docker/client/container_logs_test.go | 133 + .../docker/docker/client/container_pause.go | 10 + .../docker/client/container_pause_test.go | 41 + .../docker/docker/client/container_prune.go | 36 + .../docker/docker/client/container_remove.go | 27 + .../docker/client/container_remove_test.go | 59 + .../docker/docker/client/container_rename.go | 16 + .../docker/client/container_rename_test.go | 46 + .../docker/docker/client/container_resize.go | 29 + .../docker/client/container_resize_test.go | 82 + .../docker/docker/client/container_restart.go | 22 + .../docker/client/container_restart_test.go | 48 + .../docker/docker/client/container_start.go | 24 + .../docker/client/container_start_test.go | 58 + .../docker/docker/client/container_stats.go | 26 + .../docker/client/container_stats_test.go | 70 + .../docker/docker/client/container_stop.go | 21 + .../docker/client/container_stop_test.go | 48 + .../docker/docker/client/container_top.go | 28 + .../docker/client/container_top_test.go | 74 + .../docker/docker/client/container_unpause.go | 10 + .../docker/client/container_unpause_test.go | 41 + .../docker/docker/client/container_update.go | 22 + .../docker/client/container_update_test.go | 58 + .../docker/docker/client/container_wait.go | 26 + .../docker/client/container_wait_test.go | 70 + .../docker/docker/client/disk_usage.go | 26 + .../github.com/docker/docker/client/errors.go | 278 + .../github.com/docker/docker/client/events.go | 102 + .../docker/docker/client/events_test.go | 165 + .../github.com/docker/docker/client/hijack.go | 177 + .../docker/docker/client/image_build.go | 123 + .../docker/docker/client/image_build_test.go | 233 + .../docker/docker/client/image_create.go | 34 + .../docker/docker/client/image_create_test.go | 76 + .../docker/docker/client/image_history.go | 22 + .../docker/client/image_history_test.go | 60 + .../docker/docker/client/image_import.go | 37 + .../docker/docker/client/image_import_test.go | 81 + .../docker/docker/client/image_inspect.go | 33 + .../docker/client/image_inspect_test.go | 71 + .../docker/docker/client/image_list.go | 45 + .../docker/docker/client/image_list_test.go | 159 + .../docker/docker/client/image_load.go | 30 + .../docker/docker/client/image_load_test.go | 95 + .../docker/docker/client/image_prune.go | 36 + .../docker/docker/client/image_pull.go | 46 + .../docker/docker/client/image_pull_test.go | 199 + .../docker/docker/client/image_push.go | 54 + .../docker/docker/client/image_push_test.go | 180 + .../docker/docker/client/image_remove.go | 31 + .../docker/docker/client/image_remove_test.go | 95 + .../docker/docker/client/image_save.go | 22 + .../docker/docker/client/image_save_test.go | 58 + .../docker/docker/client/image_search.go | 51 + .../docker/docker/client/image_search_test.go | 165 + .../docker/docker/client/image_tag.go | 34 + .../docker/docker/client/image_tag_test.go | 121 + .../github.com/docker/docker/client/info.go | 26 + .../docker/docker/client/info_test.go | 76 + .../docker/docker/client/interface.go | 171 + .../docker/client/interface_experimental.go | 17 + .../docker/docker/client/interface_stable.go | 10 + .../github.com/docker/docker/client/login.go | 29 + .../docker/docker/client/network_connect.go | 18 + .../docker/client/network_connect_test.go | 107 + .../docker/docker/client/network_create.go | 25 + .../docker/client/network_create_test.go | 72 + .../docker/client/network_disconnect.go | 14 + .../docker/client/network_disconnect_test.go | 64 + .../docker/docker/client/network_inspect.go | 38 + .../docker/client/network_inspect_test.go | 69 + .../docker/docker/client/network_list.go | 31 + .../docker/docker/client/network_list_test.go | 108 + .../docker/docker/client/network_prune.go | 36 + .../docker/docker/client/network_remove.go | 10 + .../docker/client/network_remove_test.go | 47 + .../docker/docker/client/node_inspect.go | 33 + .../docker/docker/client/node_inspect_test.go | 65 + .../docker/docker/client/node_list.go | 36 + .../docker/docker/client/node_list_test.go | 94 + .../docker/docker/client/node_remove.go | 21 + .../docker/docker/client/node_remove_test.go | 69 + .../docker/docker/client/node_update.go | 18 + .../docker/docker/client/node_update_test.go | 49 + .../github.com/docker/docker/client/ping.go | 30 + .../docker/docker/client/plugin_create.go | 26 + .../docker/docker/client/plugin_disable.go | 19 + .../docker/client/plugin_disable_test.go | 48 + .../docker/docker/client/plugin_enable.go | 19 + .../docker/client/plugin_enable_test.go | 48 + .../docker/docker/client/plugin_inspect.go | 32 + .../docker/client/plugin_inspect_test.go | 54 + .../docker/docker/client/plugin_install.go | 113 + .../docker/docker/client/plugin_list.go | 21 + .../docker/docker/client/plugin_list_test.go | 59 + .../docker/docker/client/plugin_push.go | 17 + .../docker/docker/client/plugin_push_test.go | 51 + .../docker/docker/client/plugin_remove.go | 20 + .../docker/client/plugin_remove_test.go | 49 + .../docker/docker/client/plugin_set.go | 12 + .../docker/docker/client/plugin_set_test.go | 47 + .../docker/docker/client/plugin_upgrade.go | 37 + .../docker/docker/client/request.go | 247 + .../docker/docker/client/request_test.go | 92 + .../docker/docker/client/secret_create.go | 24 + .../docker/client/secret_create_test.go | 57 + .../docker/docker/client/secret_inspect.go | 34 + .../docker/client/secret_inspect_test.go | 65 + .../docker/docker/client/secret_list.go | 35 + .../docker/docker/client/secret_list_test.go | 94 + .../docker/docker/client/secret_remove.go | 10 + .../docker/client/secret_remove_test.go | 47 + .../docker/docker/client/secret_update.go | 19 + .../docker/client/secret_update_test.go | 49 + .../docker/docker/client/service_create.go | 30 + .../docker/client/service_create_test.go | 57 + .../docker/docker/client/service_inspect.go | 33 + .../docker/client/service_inspect_test.go | 65 + .../docker/docker/client/service_list.go | 35 + .../docker/docker/client/service_list_test.go | 94 + .../docker/docker/client/service_logs.go | 52 + .../docker/docker/client/service_logs_test.go | 133 + .../docker/docker/client/service_remove.go | 10 + .../docker/client/service_remove_test.go | 47 + .../docker/docker/client/service_update.go | 41 + .../docker/client/service_update_test.go | 77 + .../docker/client/swarm_get_unlock_key.go | 21 + .../docker/docker/client/swarm_init.go | 21 + .../docker/docker/client/swarm_init_test.go | 54 + .../docker/docker/client/swarm_inspect.go | 21 + .../docker/client/swarm_inspect_test.go | 56 + .../docker/docker/client/swarm_join.go | 13 + .../docker/docker/client/swarm_join_test.go | 51 + .../docker/docker/client/swarm_leave.go | 18 + .../docker/docker/client/swarm_leave_test.go | 66 + .../docker/docker/client/swarm_unlock.go | 17 + .../docker/docker/client/swarm_update.go | 22 + .../docker/docker/client/swarm_update_test.go | 49 + .../docker/docker/client/task_inspect.go | 34 + .../docker/docker/client/task_inspect_test.go | 54 + .../docker/docker/client/task_list.go | 35 + .../docker/docker/client/task_list_test.go | 94 + .../docker/docker/client/transport.go | 28 + .../github.com/docker/docker/client/utils.go | 33 + .../docker/docker/client/version.go | 21 + .../docker/docker/client/volume_create.go | 21 + .../docker/client/volume_create_test.go | 75 + .../docker/docker/client/volume_inspect.go | 38 + .../docker/client/volume_inspect_test.go | 76 + .../docker/docker/client/volume_list.go | 32 + .../docker/docker/client/volume_list_test.go | 98 + .../docker/docker/client/volume_prune.go | 36 + .../docker/docker/client/volume_remove.go | 21 + .../docker/client/volume_remove_test.go | 47 + vendor/github.com/docker/docker/pkg/README.md | 11 + .../docker/pkg/tlsconfig/tlsconfig_clone.go | 11 + .../pkg/tlsconfig/tlsconfig_clone_go16.go | 31 + .../pkg/tlsconfig/tlsconfig_clone_go17.go | 33 + vendor/github.com/docker/docker/poule.yml | 88 + vendor/github.com/docker/docker/vendor.conf | 140 + .../docker/go-connections/CONTRIBUTING.md | 55 + .../github.com/docker/go-connections/LICENSE | 191 + .../docker/go-connections/MAINTAINERS | 27 + .../docker/go-connections/README.md | 13 + .../docker/go-connections/circle.yml | 14 + .../github.com/docker/go-connections/doc.go | 3 + .../docker/go-connections/nat/nat.go | 242 + .../docker/go-connections/nat/nat_test.go | 583 ++ .../docker/go-connections/nat/parse.go | 57 + .../docker/go-connections/nat/parse_test.go | 54 + .../docker/go-connections/nat/sort.go | 96 + .../docker/go-connections/nat/sort_test.go | 85 + .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 + .../sockets/inmem_socket_test.go | 39 + .../docker/go-connections/sockets/proxy.go | 51 + .../docker/go-connections/sockets/sockets.go | 38 + .../go-connections/sockets/sockets_unix.go | 35 + .../go-connections/sockets/sockets_windows.go | 27 + .../go-connections/sockets/tcp_socket.go | 22 + .../go-connections/sockets/unix_socket.go | 32 + .../go-connections/tlsconfig/certpool_go17.go | 18 + .../tlsconfig/certpool_other.go | 14 + .../docker/go-connections/tlsconfig/config.go | 244 + .../tlsconfig/config_client_ciphers.go | 17 + .../tlsconfig/config_legacy_client_ciphers.go | 15 + .../go-connections/tlsconfig/config_test.go | 651 ++ .../docker/go-units/CONTRIBUTING.md | 67 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/MAINTAINERS | 27 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/circle.yml | 11 + vendor/github.com/docker/go-units/duration.go | 35 + .../docker/go-units/duration_test.go | 95 + vendor/github.com/docker/go-units/size.go | 108 + .../github.com/docker/go-units/size_test.go | 165 + vendor/github.com/docker/go-units/ulimit.go | 118 + .../github.com/docker/go-units/ulimit_test.go | 131 + .../eapache/go-resiliency/.gitignore | 24 + .../eapache/go-resiliency/.travis.yml | 7 + .../eapache/go-resiliency/README.md | 21 + .../go-resiliency/breaker/breaker_test.go | 196 + .../eapache/go-xerial-snappy/.gitignore | 24 + .../eapache/go-xerial-snappy/.travis.yml | 7 + .../eapache/go-xerial-snappy/snappy_test.go | 49 + vendor/github.com/eapache/queue/.gitignore | 23 + vendor/github.com/eapache/queue/.travis.yml | 7 + vendor/github.com/eapache/queue/queue_test.go | 162 + .../github.com/go-macaron/binding/.gitignore | 1 + .../github.com/go-macaron/binding/.travis.yml | 15 + .../go-macaron/binding/bind_test.go | 57 + .../go-macaron/binding/common_test.go | 127 + .../go-macaron/binding/errorhandler_test.go | 162 + .../go-macaron/binding/errors_test.go | 115 + .../go-macaron/binding/file_test.go | 191 + .../go-macaron/binding/form_test.go | 282 + .../go-macaron/binding/json_test.go | 240 + .../go-macaron/binding/misc_test.go | 123 + .../go-macaron/binding/multipart_test.go | 155 + .../go-macaron/binding/validate_test.go | 412 + .../github.com/go-macaron/inject/.travis.yml | 14 + .../go-macaron/inject/inject_test.go | 285 + vendor/github.com/gocql/gocql/.gitignore | 5 + vendor/github.com/gocql/gocql/.travis.yml | 45 + .../gocql/gocql/address_translators_test.go | 34 + vendor/github.com/gocql/gocql/batch_test.go | 58 + .../github.com/gocql/gocql/cass1batch_test.go | 60 + .../github.com/gocql/gocql/cassandra_test.go | 2697 ++++++ vendor/github.com/gocql/gocql/cluster_test.go | 53 + vendor/github.com/gocql/gocql/common_test.go | 196 + .../github.com/gocql/gocql/compressor_test.go | 40 + vendor/github.com/gocql/gocql/conn_test.go | 935 ++ vendor/github.com/gocql/gocql/control_test.go | 66 + vendor/github.com/gocql/gocql/errors_test.go | 29 + .../github.com/gocql/gocql/events_ccm_test.go | 297 + vendor/github.com/gocql/gocql/events_test.go | 33 + vendor/github.com/gocql/gocql/filters_test.go | 93 + vendor/github.com/gocql/gocql/frame_test.go | 106 + .../gocql/gocql/framer_bench_test.go | 48 + .../gocql/gocql/host_source_test.go | 137 + .../gocql/gocql/internal/lru/lru.go | 2 +- .../gocql/gocql/internal/lru/lru_test.go | 72 + .../gocql/internal/murmur/murmur_test.go | 74 + .../gocql/internal/streams/streams_test.go | 201 + vendor/github.com/gocql/gocql/marshal_test.go | 1403 +++ .../github.com/gocql/gocql/metadata_test.go | 815 ++ .../github.com/gocql/gocql/policies_test.go | 320 + vendor/github.com/gocql/gocql/ring_test.go | 38 + .../gocql/gocql/session_connect_test.go | 131 + vendor/github.com/gocql/gocql/session_test.go | 252 + vendor/github.com/gocql/gocql/stress_test.go | 70 + vendor/github.com/gocql/gocql/token_test.go | 335 + .../github.com/gocql/gocql/topology_test.go | 51 + vendor/github.com/gocql/gocql/tuple_test.go | 127 + vendor/github.com/gocql/gocql/udt_test.go | 503 ++ vendor/github.com/gocql/gocql/uuid_test.go | 218 + vendor/github.com/gocql/gocql/wiki_test.go | 279 + vendor/github.com/golang/snappy/.gitignore | 16 + vendor/github.com/golang/snappy/AUTHORS | 1 + vendor/github.com/golang/snappy/CONTRIBUTORS | 1 + vendor/github.com/golang/snappy/README | 100 + vendor/github.com/golang/snappy/decode.go | 123 +- .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 ++ .../github.com/golang/snappy/decode_other.go | 101 + vendor/github.com/golang/snappy/encode.go | 363 +- .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 ++ .../github.com/golang/snappy/encode_other.go | 238 + .../github.com/golang/snappy/golden_test.go | 1965 +++++ vendor/github.com/golang/snappy/snappy.go | 25 +- .../github.com/golang/snappy/snappy_test.go | 1353 +++ .../google/go-querystring/.gitignore | 1 + .../google/go-querystring/CONTRIBUTING.md | 67 + .../google/go-querystring/README.md | 39 + .../google/go-querystring/query/encode.go | 25 +- .../go-querystring/query/encode_test.go | 328 + .../github.com/gopherjs/gopherjs/.gitignore | 2 + vendor/github.com/gopherjs/gopherjs/README.md | 120 + .../github.com/gopherjs/gopherjs/circle.yml | 21 + .../gopherjs/gopherjs/js/js_test.go | 553 ++ vendor/github.com/gopherjs/gopherjs/tool.go | 791 ++ .../carbon-relay-ng/.gitattributes | 1 + .../graphite-ng/carbon-relay-ng/.gitignore | 9 + .../graphite-ng/carbon-relay-ng/Dockerfile | 8 + .../graphite-ng/carbon-relay-ng/LICENSE | 29 + .../graphite-ng/carbon-relay-ng/Makefile | 140 + .../graphite-ng/carbon-relay-ng/README.md | 280 + .../graphite-ng/carbon-relay-ng/circle.yml | 50 + .../carbon-relay-ng/clock/clock.go | 26 + .../carbon-relay-ng/deploy-docker.sh | 9 + .../carbon-relay-ng/grafana-dashboard.json | 1008 +++ .../hailocab/go-hostpool/.gitignore | 22 + .../hailocab/go-hostpool/.travis.yml | 0 .../hailocab/go-hostpool/epsilon_greedy.go | 24 +- .../hailocab/go-hostpool/example_test.go | 13 + .../hailocab/go-hostpool/hostpool.go | 32 +- .../hailocab/go-hostpool/hostpool_test.go | 145 + .../hashicorp/errwrap/errwrap_test.go | 94 + .../github.com/hashicorp/go-msgpack/README.md | 14 + .../hashicorp/go-msgpack/codec/bench_test.go | 319 + .../hashicorp/go-msgpack/codec/codecs_test.go | 1002 +++ .../go-msgpack/codec/ext_dep_test.go | 75 + .../go-msgpack/codec/z_helper_test.go | 103 + .../hashicorp/go-msgpack/msgpack.org.md | 47 + .../hashicorp/go-multierror/append_test.go | 64 + .../hashicorp/go-multierror/flatten_test.go | 48 + .../hashicorp/go-multierror/format_test.go | 23 + .../go-multierror/multierror_test.go | 70 + .../hashicorp/go-multierror/prefix_test.go | 33 + .../hashicorp/go-sockaddr/.gitignore | 26 + .../hashicorp/go-sockaddr/ifaddr_test.go | 594 ++ .../hashicorp/go-sockaddr/ifaddrs_test.go | 1987 +++++ .../hashicorp/go-sockaddr/ifattr_test.go | 112 + .../hashicorp/go-sockaddr/ipaddr_test.go | 222 + .../hashicorp/go-sockaddr/ipaddrs_test.go | 460 + .../hashicorp/go-sockaddr/ipv4addr_test.go | 991 +++ .../hashicorp/go-sockaddr/ipv6addr_test.go | 725 ++ .../hashicorp/go-sockaddr/rfc_test.go | 63 + .../hashicorp/go-sockaddr/route_info_test.go | 196 + .../hashicorp/go-sockaddr/sockaddr_test.go | 440 + .../hashicorp/go-sockaddr/sockaddrs_test.go | 338 + .../hashicorp/go-sockaddr/unixsock_test.go | 108 + .../hashicorp/memberlist/.gitignore | 25 + .../hashicorp/memberlist/awareness_test.go | 41 + .../hashicorp/memberlist/broadcast_test.go | 27 + .../hashicorp/memberlist/integ_test.go | 89 + .../hashicorp/memberlist/keyring_test.go | 154 + .../hashicorp/memberlist/logging_test.go | 47 + .../hashicorp/memberlist/memberlist_test.go | 1545 ++++ .../hashicorp/memberlist/net_test.go | 814 ++ .../hashicorp/memberlist/queue_test.go | 172 + .../hashicorp/memberlist/security_test.go | 70 + .../hashicorp/memberlist/state_test.go | 1900 ++++ .../hashicorp/memberlist/suspicion_test.go | 198 + .../hashicorp/memberlist/transport_test.go | 124 + .../hashicorp/memberlist/util_test.go | 358 + .../jpillora/backoff/backoff_test.go | 126 + vendor/github.com/jtolds/gls/context_test.go | 139 + vendor/github.com/kisielk/og-rek/.gitignore | 1 + vendor/github.com/kisielk/og-rek/.travis.yml | 7 + .../github.com/kisielk/og-rek/encode_test.go | 90 + .../github.com/kisielk/og-rek/ogorek_test.go | 376 + .../whisper-go/whisper/whisper_test.go | 335 + .../github.com/klauspost/compress/.gitignore | 24 + .../github.com/klauspost/compress/.travis.yml | 24 + .../github.com/klauspost/compress/README.md | 290 + .../klauspost/compress/flate/asm_test.go | 193 + .../klauspost/compress/flate/copy_test.go | 54 + .../klauspost/compress/flate/deflate_test.go | 648 ++ .../compress/flate/dict_decoder_test.go | 139 + .../klauspost/compress/flate/flate_test.go | 260 + .../compress/flate/huffman_bit_writer_test.go | 366 + .../klauspost/compress/flate/inflate_test.go | 282 + .../klauspost/compress/flate/reader_test.go | 97 + .../klauspost/compress/flate/writer_test.go | 258 + .../klauspost/compress/gzip/example_test.go | 128 + .../klauspost/compress/gzip/gunzip_test.go | 682 ++ .../klauspost/compress/gzip/gzip_test.go | 519 ++ vendor/github.com/klauspost/cpuid/.gitignore | 24 + vendor/github.com/klauspost/cpuid/.travis.yml | 8 + .../github.com/klauspost/cpuid/cpuid_test.go | 727 ++ .../klauspost/cpuid/mockcpu_test.go | 209 + vendor/github.com/klauspost/crc32/.gitignore | 24 + vendor/github.com/klauspost/crc32/.travis.yml | 11 + .../github.com/klauspost/crc32/crc32_test.go | 170 + .../klauspost/crc32/example_test.go | 28 + .../github.com/mattbaird/elastigo/.drone.yml | 10 + .../github.com/mattbaird/elastigo/.gitignore | 31 + .../github.com/mattbaird/elastigo/.gitmodules | 12 + .../github.com/mattbaird/elastigo/.travis.yml | 13 + .../github.com/mattbaird/elastigo/HACKING.md | 21 + .../github.com/mattbaird/elastigo/README.md | 249 + .../github.com/mattbaird/elastigo/Vagrantfile | 29 + .../github.com/mattbaird/elastigo/client.go | 85 + vendor/github.com/mattbaird/elastigo/doc.go | 15 + .../elastigo/lib/cataliasinfo_test.go | 26 + .../elastigo/lib/catindexinfo_test.go | 117 + .../elastigo/lib/catnodeinfo_test.go | 58 + .../elastigo/lib/catshardinfo_test.go | 85 + .../elastigo/lib/clusternodesinfo_test.go | 37 + .../mattbaird/elastigo/lib/connection_test.go | 62 + .../mattbaird/elastigo/lib/corebulk_test.go | 399 + .../elastigo/lib/coreexample_test.go | 52 + .../elastigo/lib/corepercolate_test.go | 64 + .../mattbaird/elastigo/lib/coresearch_test.go | 83 + .../mattbaird/elastigo/lib/coretest_test.go | 198 + .../elastigo/lib/indicesdeletemapping_test.go | 54 + .../elastigo/lib/indicesputmapping_test.go | 356 + .../mattbaird/elastigo/lib/request_test.go | 200 + .../elastigo/lib/searchaggregate_test.go | 177 + .../elastigo/lib/searchfacet_test.go | 42 + .../elastigo/lib/searchfilter_test.go | 287 + .../elastigo/lib/searchhighlight_test.go | 67 + .../elastigo/lib/searchsearch_test.go | 307 + .../mattbaird/elastigo/lib/setup_test.go | 84 + .../mattbaird/elastigo/lib/shared_test.go | 43 + .../metrics20/go-metrics20/README.md | 8 + .../go-metrics20/carbon20/manipulate_test.go | 106 + .../go-metrics20/carbon20/validate_test.go | 148 + .../go-metrics20/carbon20/version_test.go | 99 + vendor/github.com/miekg/dns/.gitignore | 4 + vendor/github.com/miekg/dns/.travis.yml | 7 + vendor/github.com/miekg/dns/client_test.go | 469 + .../github.com/miekg/dns/clientconfig_test.go | 50 + vendor/github.com/miekg/dns/dns_bench_test.go | 211 + vendor/github.com/miekg/dns/dns_test.go | 452 + vendor/github.com/miekg/dns/dnssec_test.go | 733 ++ vendor/github.com/miekg/dns/dyn_test.go | 3 + vendor/github.com/miekg/dns/edns_test.go | 32 + vendor/github.com/miekg/dns/example_test.go | 146 + vendor/github.com/miekg/dns/fuzz_test.go | 25 + vendor/github.com/miekg/dns/issue_test.go | 23 + vendor/github.com/miekg/dns/labels_test.go | 200 + vendor/github.com/miekg/dns/nsecx_test.go | 29 + vendor/github.com/miekg/dns/parse_test.go | 1524 ++++ vendor/github.com/miekg/dns/privaterr_test.go | 170 + vendor/github.com/miekg/dns/remote_test.go | 19 + vendor/github.com/miekg/dns/sanitize_test.go | 85 + vendor/github.com/miekg/dns/server_test.go | 679 ++ vendor/github.com/miekg/dns/sig0_test.go | 89 + vendor/github.com/miekg/dns/tsig_test.go | 37 + vendor/github.com/miekg/dns/types_test.go | 42 + vendor/github.com/miekg/dns/update_test.go | 145 + vendor/github.com/miekg/dns/xfr_test.go | 161 + .../github.com/mitchellh/go-homedir/LICENSE | 21 + .../github.com/mitchellh/go-homedir/README.md | 14 + .../mitchellh/go-homedir/homedir.go | 137 + .../mitchellh/go-homedir/homedir_test.go | 112 + .../mreiferson/go-snappystream/.travis.yml | 13 + .../go-snappystream/fixturedata_test.go | 2701 ++++++ .../mreiferson/go-snappystream/reader_test.go | 650 ++ .../go-snappystream/readwrite_test.go | 425 + .../go-snappystream/snappy-go/snappy_test.go | 261 + .../mreiferson/go-snappystream/writer_test.go | 126 + vendor/github.com/nsqio/go-nsq/.travis.yml | 26 + .../github.com/nsqio/go-nsq/command_test.go | 18 + .../nsqio/go-nsq/config_flag_test.go | 25 + vendor/github.com/nsqio/go-nsq/config_test.go | 104 + .../github.com/nsqio/go-nsq/consumer_test.go | 237 + vendor/github.com/nsqio/go-nsq/mock_test.go | 472 + .../github.com/nsqio/go-nsq/producer_test.go | 366 + .../opentracing/opentracing-go/.gitignore | 13 + .../opentracing/opentracing-go/.travis.yml | 14 + .../opentracing-go/ext/tags_test.go | 148 + .../opentracing-go/gocontext_test.go | 81 + .../opentracing-go/log/field_test.go | 51 + .../opentracing-go/options_test.go | 31 + .../opentracing-go/propagation_test.go | 93 + .../opentracing-go/testtracer_test.go | 138 + .../github.com/philhofer/fwd/reader_test.go | 354 + .../github.com/philhofer/fwd/writer_test.go | 239 + vendor/github.com/pkg/errors/.gitignore | 24 + vendor/github.com/pkg/errors/.travis.yml | 11 + vendor/github.com/pkg/errors/LICENSE | 23 + vendor/github.com/pkg/errors/README.md | 52 + vendor/github.com/pkg/errors/appveyor.yml | 32 + vendor/github.com/pkg/errors/bench_test.go | 59 + vendor/github.com/pkg/errors/errors.go | 269 + vendor/github.com/pkg/errors/errors_test.go | 226 + vendor/github.com/pkg/errors/example_test.go | 205 + vendor/github.com/pkg/errors/format_test.go | 535 ++ vendor/github.com/pkg/errors/stack.go | 178 + vendor/github.com/pkg/errors/stack_test.go | 292 + .../github.com/raintank/dur/datetime_test.go | 137 + .../github.com/raintank/dur/duration_test.go | 40 + vendor/github.com/raintank/gziper/.gitignore | 24 + .../github.com/raintank/gziper/gzip_test.go | 164 + vendor/github.com/raintank/met/LICENSE | 661 ++ vendor/github.com/raintank/met/NOTICE | 15 + vendor/github.com/raintank/met/README.md | 15 + .../raintank/met/dogstatsd/count.go | 18 + .../raintank/met/dogstatsd/gauge.go | 51 + .../github.com/raintank/met/dogstatsd/init.go | 27 + .../raintank/met/dogstatsd/meter.go | 20 + .../raintank/met/dogstatsd/timer.go | 23 + .../github.com/raintank/met/helper/helper.go | 25 + vendor/github.com/raintank/met/interfaces.go | 35 + .../github.com/raintank/met/statsd/count.go | 18 + .../github.com/raintank/met/statsd/gauge.go | 52 + vendor/github.com/raintank/met/statsd/init.go | 14 + .../github.com/raintank/met/statsd/meter.go | 20 + .../github.com/raintank/met/statsd/timer.go | 23 + .../raintank/worldping-api/.bra.toml | 16 + .../raintank/worldping-api/.editorconfig | 12 + .../raintank/worldping-api/.gitignore | 28 + .../raintank/worldping-api/.gitmodules | 0 .../raintank/worldping-api/CHANGELOG.md | 3 + .../raintank/worldping-api/README.md | 10 + .../raintank/worldping-api/apiary.apib | 778 ++ .../raintank/worldping-api/circle.yml | 33 + .../github.com/raintank/worldping-api/main.go | 150 + .../github.com/raintank/worldping-api/test.sh | 36 + .../github.com/rakyll/globalconf/.travis.yml | 2 + .../rakyll/globalconf/globalconf_test.go | 267 + vendor/github.com/rakyll/goini/.gitignore | 8 + vendor/github.com/rakyll/goini/ini_test.go | 169 + vendor/github.com/rs/cors/.travis.yml | 4 + vendor/github.com/rs/cors/bench_test.go | 88 + vendor/github.com/rs/cors/cors_test.go | 720 ++ vendor/github.com/rs/cors/utils_test.go | 70 + vendor/github.com/rs/xhandler/.travis.yml | 7 + .../rs/xhandler/chain_example_test.go | 85 + vendor/github.com/rs/xhandler/chain_test.go | 209 + .../github.com/rs/xhandler/middleware_test.go | 88 + .../rs/xhandler/xhandler_example_test.go | 67 + .../github.com/rs/xhandler/xhandler_test.go | 61 + vendor/github.com/sean-/seed/.gitignore | 24 + vendor/github.com/sean-/seed/init_test.go | 26 + vendor/github.com/sergi/go-diff/.gitignore | 22 + vendor/github.com/sergi/go-diff/.travis.yml | 27 + .../sergi/go-diff/APACHE-LICENSE-2.0 | 177 + vendor/github.com/sergi/go-diff/AUTHORS | 25 + vendor/github.com/sergi/go-diff/CONTRIBUTORS | 32 + vendor/github.com/sergi/go-diff/Makefile | 44 + vendor/github.com/sergi/go-diff/README.md | 84 + .../go-diff/diffmatchpatch/benchutil_test.go | 28 + .../sergi/go-diff/diffmatchpatch/diff_test.go | 1427 +++ .../go-diff/diffmatchpatch/match_test.go | 174 + .../go-diff/diffmatchpatch/patch_test.go | 339 + .../go-diff/diffmatchpatch/stringutil_test.go | 116 + .../smartystreets/assertions/.gitignore | 3 + .../smartystreets/assertions/.travis.yml | 14 + .../assertions/collections_test.go | 157 + .../smartystreets/assertions/doc_test.go | 57 + .../smartystreets/assertions/equality_test.go | 269 + .../assertions/internal/Makefile | 23 + .../assertions/internal/go-render/.travis.yml | 21 + .../internal/go-render/PRESUBMIT.py | 109 + .../assertions/internal/go-render/README.md | 78 + .../assertions/internal/go-render/WATCHLISTS | 26 + .../internal/go-render/pre-commit-go.yml | 78 + .../internal/go-render/render/render_test.go | 170 + .../internal/oglematchers/.gitignore | 5 + .../internal/oglematchers/.travis.yml | 4 + .../internal/oglematchers/all_of_test.go | 110 + .../internal/oglematchers/any_of_test.go | 139 + .../internal/oglematchers/any_test.go | 53 + .../internal/oglematchers/contains_test.go | 233 + .../internal/oglematchers/deep_equals_test.go | 343 + .../oglematchers/elements_are_test.go | 208 + .../internal/oglematchers/equals_test.go | 3864 ++++++++ .../internal/oglematchers/error_test.go | 92 + .../oglematchers/greater_or_equal_test.go | 1101 +++ .../oglematchers/greater_than_test.go | 1077 +++ .../oglematchers/has_same_type_as_test.go | 181 + .../internal/oglematchers/has_substr_test.go | 93 + .../oglematchers/identical_to_test.go | 849 ++ .../oglematchers/less_or_equal_test.go | 1077 +++ .../internal/oglematchers/less_than_test.go | 1057 +++ .../oglematchers/matches_regexp_test.go | 92 + .../internal/oglematchers/not_test.go | 108 + .../internal/oglematchers/panics_test.go | 141 + .../internal/oglematchers/pointee_test.go | 152 + .../smartystreets/assertions/panic_test.go | 53 + .../smartystreets/assertions/quantity_test.go | 145 + .../assertions/serializer_test.go | 36 + .../smartystreets/assertions/strings_test.go | 118 + .../smartystreets/assertions/time_test.go | 159 + .../smartystreets/assertions/type_test.go | 76 + .../assertions/utilities_for_test.go | 75 + .../smartystreets/goconvey/.gitignore | 5 + .../smartystreets/goconvey/.travis.yml | 14 + .../smartystreets/goconvey/CONTRIBUTING.md | 22 + .../smartystreets/goconvey/README.md | 126 + .../goconvey/convey/focused_execution_test.go | 72 + .../goconvey/convey/gotest/doc_test.go | 1 + .../convey/isolated_execution_test.go | 774 ++ .../goconvey/convey/reporting/dot_test.go | 40 + .../goconvey/convey/reporting/gotest_test.go | 66 + .../goconvey/convey/reporting/printer_test.go | 181 + .../convey/reporting/problems_test.go | 51 + .../convey/reporting/reporter_test.go | 94 + .../goconvey/convey/reporting_hooks_test.go | 317 + .../goconvey/convey/story_conventions_test.go | 175 + .../smartystreets/goconvey/dependencies.go | 4 + .../smartystreets/goconvey/doc_test.go | 1 + .../smartystreets/goconvey/goconvey.go | 280 + .../github.com/syndtr/goleveldb/.travis.yml | 12 + vendor/github.com/syndtr/goleveldb/README.md | 105 + .../syndtr/goleveldb/leveldb/batch_test.go | 123 + .../syndtr/goleveldb/leveldb/bench_test.go | 509 ++ .../goleveldb/leveldb/cache/bench_test.go | 29 + .../goleveldb/leveldb/cache/cache_test.go | 553 ++ .../syndtr/goleveldb/leveldb/corrupt_test.go | 496 ++ .../syndtr/goleveldb/leveldb/db_test.go | 2907 ++++++ .../syndtr/goleveldb/leveldb/external_test.go | 117 + .../goleveldb/leveldb/filter/bloom_test.go | 142 + .../leveldb/iterator/array_iter_test.go | 30 + .../leveldb/iterator/indexed_iter_test.go | 83 + .../leveldb/iterator/iter_suite_test.go | 11 + .../leveldb/iterator/merged_iter_test.go | 60 + .../goleveldb/leveldb/journal/journal_test.go | 818 ++ .../syndtr/goleveldb/leveldb/key_test.go | 133 + .../goleveldb/leveldb/leveldb_suite_test.go | 11 + .../goleveldb/leveldb/memdb/bench_test.go | 75 + .../leveldb/memdb/memdb_suite_test.go | 11 + .../goleveldb/leveldb/memdb/memdb_test.go | 135 + .../goleveldb/leveldb/session_record_test.go | 62 + .../leveldb/storage/file_storage_test.go | 176 + .../leveldb/storage/mem_storage_test.go | 65 + .../goleveldb/leveldb/table/block_test.go | 139 + .../leveldb/table/table_suite_test.go | 11 + .../goleveldb/leveldb/table/table_test.go | 123 + .../syndtr/goleveldb/leveldb/testutil_test.go | 91 + .../goleveldb/leveldb/util/buffer_test.go | 369 + .../goleveldb/leveldb/util/hash_test.go | 46 + .../syndtr/goleveldb/leveldb/version_test.go | 181 + vendor/github.com/tinylib/msgp/.gitignore | 5 + vendor/github.com/tinylib/msgp/.travis.yml | 12 + vendor/github.com/tinylib/msgp/Makefile | 55 + vendor/github.com/tinylib/msgp/README.md | 104 + vendor/github.com/tinylib/msgp/main.go | 119 + .../github.com/tinylib/msgp/msgp/defs_test.go | 12 + .../github.com/tinylib/msgp/msgp/edit_test.go | 200 + .../tinylib/msgp/msgp/extension_test.go | 49 + .../github.com/tinylib/msgp/msgp/file_test.go | 103 + .../tinylib/msgp/msgp/floatbench_test.go | 25 + .../tinylib/msgp/msgp/json_bytes_test.go | 121 + .../github.com/tinylib/msgp/msgp/json_test.go | 142 + .../tinylib/msgp/msgp/number_test.go | 94 + .../github.com/tinylib/msgp/msgp/raw_test.go | 85 + .../tinylib/msgp/msgp/read_bytes_test.go | 518 ++ .../github.com/tinylib/msgp/msgp/read_test.go | 724 ++ .../tinylib/msgp/msgp/write_bytes_test.go | 319 + .../tinylib/msgp/msgp/write_test.go | 405 + .../uber/jaeger-client-go/.gitignore | 15 + .../uber/jaeger-client-go/.gitmodules | 3 + .../uber/jaeger-client-go/.travis.yml | 44 + .../uber/jaeger-client-go/CHANGELOG.md | 16 +- .../uber/jaeger-client-go/CONTRIBUTING.md | 155 +- vendor/github.com/uber/jaeger-client-go/DCO | 37 + .../github.com/uber/jaeger-client-go/LICENSE | 222 +- .../github.com/uber/jaeger-client-go/Makefile | 11 +- .../uber/jaeger-client-go/README.md | 60 +- .../uber/jaeger-client-go/baggage_setter.go | 26 +- .../jaeger-client-go/baggage_setter_test.go | 126 + .../uber/jaeger-client-go/config/config.go | 31 +- .../jaeger-client-go/config/config_test.go | 259 + .../jaeger-client-go/config/example_test.go | 84 + .../uber/jaeger-client-go/config/options.go | 32 +- .../jaeger-client-go/config/options_test.go | 72 + .../uber/jaeger-client-go/constants.go | 32 +- .../uber/jaeger-client-go/constants_test.go | 29 + .../uber/jaeger-client-go/context.go | 28 +- .../uber/jaeger-client-go/context_test.go | 110 + .../uber/jaeger-client-go/contrib_observer.go | 24 +- .../github.com/uber/jaeger-client-go/doc.go | 26 +- .../uber/jaeger-client-go/glide.lock | 58 +- .../uber/jaeger-client-go/glide.yaml | 4 + .../uber/jaeger-client-go/header.go | 24 +- .../uber/jaeger-client-go/header_test.go | 50 + .../internal/baggage/remote/options.go | 24 +- .../baggage/remote/restriction_manager.go | 26 +- .../remote/restriction_manager_test.go | 220 + .../internal/baggage/restriction_manager.go | 33 +- .../baggage/restriction_manager_test.go | 29 + .../jaeger-client-go/internal/spanlog/json.go | 28 +- .../uber/jaeger-client-go/interop.go | 26 +- .../uber/jaeger-client-go/jaeger_tag.go | 24 +- .../jaeger-client-go/jaeger_thrift_span.go | 24 +- .../jaeger_thrift_span_test.go | 388 + .../uber/jaeger-client-go/log/logger.go | 26 +- .../uber/jaeger-client-go/log/logger_test.go | 26 + .../uber/jaeger-client-go/logger.go | 26 +- .../uber/jaeger-client-go/logger_test.go | 40 + .../uber/jaeger-client-go/metrics.go | 74 +- .../uber/jaeger-client-go/metrics_test.go | 48 + .../uber/jaeger-client-go/observer.go | 24 +- .../uber/jaeger-client-go/observer_test.go | 109 + .../uber/jaeger-client-go/propagation.go | 26 +- .../uber/jaeger-client-go/propagation_test.go | 267 + .../uber/jaeger-client-go/reference.go | 24 +- .../uber/jaeger-client-go/reporter.go | 26 +- .../uber/jaeger-client-go/reporter_options.go | 24 +- .../uber/jaeger-client-go/reporter_test.go | 272 + .../uber/jaeger-client-go/rpcmetrics/doc.go | 24 +- .../jaeger-client-go/rpcmetrics/endpoints.go | 24 +- .../rpcmetrics/endpoints_test.go | 43 + .../jaeger-client-go/rpcmetrics/metrics.go | 24 +- .../rpcmetrics/metrics_test.go | 61 + .../jaeger-client-go/rpcmetrics/normalizer.go | 24 +- .../rpcmetrics/normalizer_test.go | 34 + .../jaeger-client-go/rpcmetrics/observer.go | 24 +- .../rpcmetrics/observer_test.go | 177 + .../uber/jaeger-client-go/sampler.go | 27 +- .../uber/jaeger-client-go/sampler_options.go | 24 +- .../uber/jaeger-client-go/sampler_test.go | 691 ++ .../github.com/uber/jaeger-client-go/span.go | 42 +- .../uber/jaeger-client-go/span_test.go | 90 + .../uber/jaeger-client-go/thrift/.nocover | 0 .../uber/jaeger-client-go/tracer.go | 51 +- .../uber/jaeger-client-go/tracer_options.go | 38 +- .../uber/jaeger-client-go/tracer_test.go | 360 + .../uber/jaeger-client-go/transport.go | 26 +- .../uber/jaeger-client-go/transport_udp.go | 26 +- .../jaeger-client-go/transport_udp_test.go | 221 + .../uber/jaeger-client-go/utils/http_json.go | 28 +- .../jaeger-client-go/utils/http_json_test.go | 58 + .../uber/jaeger-client-go/utils/localip.go | 28 +- .../uber/jaeger-client-go/utils/rand.go | 28 +- .../jaeger-client-go/utils/rate_limiter.go | 26 +- .../utils/rate_limiter_test.go | 75 + .../uber/jaeger-client-go/utils/udp_client.go | 26 +- .../uber/jaeger-client-go/utils/utils.go | 28 +- .../uber/jaeger-client-go/utils/utils_test.go | 91 + .../uber/jaeger-client-go/zipkin.go | 24 +- .../uber/jaeger-client-go/zipkin_test.go | 68 + .../jaeger-client-go/zipkin_thrift_span.go | 26 +- .../zipkin_thrift_span_test.go | 329 + vendor/github.com/uber/jaeger-lib/.gitignore | 11 + vendor/github.com/uber/jaeger-lib/.travis.yml | 21 + .../github.com/uber/jaeger-lib/CHANGELOG.md | 25 + .../uber/jaeger-lib/CONTRIBUTING.md | 163 + vendor/github.com/uber/jaeger-lib/DCO | 37 + vendor/github.com/uber/jaeger-lib/LICENSE | 222 +- vendor/github.com/uber/jaeger-lib/Makefile | 84 + vendor/github.com/uber/jaeger-lib/README.md | 22 + vendor/github.com/uber/jaeger-lib/glide.lock | 78 + vendor/github.com/uber/jaeger-lib/glide.yaml | 13 + .../uber/jaeger-lib/metrics/counter.go | 24 +- .../uber/jaeger-lib/metrics/factory.go | 24 +- .../uber/jaeger-lib/metrics/gauge.go | 24 +- .../uber/jaeger-lib/metrics/local.go | 24 +- .../uber/jaeger-lib/metrics/local_test.go | 116 + .../uber/jaeger-lib/metrics/metrics.go | 24 +- .../uber/jaeger-lib/metrics/metrics_test.go | 89 + .../uber/jaeger-lib/metrics/stopwatch.go | 24 +- .../uber/jaeger-lib/metrics/timer.go | 24 +- vendor/golang.org/x/net/.gitattributes | 10 + vendor/golang.org/x/net/.gitignore | 2 + vendor/golang.org/x/net/AUTHORS | 3 + vendor/golang.org/x/net/CONTRIBUTING.md | 31 + vendor/golang.org/x/net/CONTRIBUTORS | 3 + vendor/golang.org/x/net/README | 3 + vendor/golang.org/x/net/codereview.cfg | 1 + .../golang.org/x/net/context/context_test.go | 577 ++ .../x/net/context/ctxhttp/ctxhttp.go | 146 + .../x/net/context/ctxhttp/ctxhttp_test.go | 176 + .../x/net/context/withtimeout_test.go | 26 + vendor/golang.org/x/net/proxy/direct.go | 18 + vendor/golang.org/x/net/proxy/per_host.go | 140 + .../golang.org/x/net/proxy/per_host_test.go | 55 + vendor/golang.org/x/net/proxy/proxy.go | 94 + vendor/golang.org/x/net/proxy/proxy_test.go | 142 + vendor/golang.org/x/net/proxy/socks5.go | 210 + vendor/golang.org/x/sys/.gitattributes | 10 + vendor/golang.org/x/sys/.gitignore | 2 + vendor/golang.org/x/sys/AUTHORS | 3 + vendor/golang.org/x/sys/CONTRIBUTING.md | 31 + vendor/golang.org/x/sys/CONTRIBUTORS | 3 + .../go-uuid => golang.org/x/sys}/LICENSE | 2 +- vendor/golang.org/x/sys/PATENTS | 22 + vendor/golang.org/x/sys/README.md | 18 + vendor/golang.org/x/sys/codereview.cfg | 1 + .../x/sys/windows/asm_windows_386.s | 13 + .../x/sys/windows/asm_windows_amd64.s | 13 + .../golang.org/x/sys/windows/dll_windows.go | 378 + vendor/golang.org/x/sys/windows/env_unset.go | 15 + .../golang.org/x/sys/windows/env_windows.go | 25 + vendor/golang.org/x/sys/windows/eventlog.go | 20 + .../golang.org/x/sys/windows/exec_windows.go | 97 + .../x/sys/windows/memory_windows.go | 26 + vendor/golang.org/x/sys/windows/mksyscall.go | 7 + vendor/golang.org/x/sys/windows/race.go | 30 + vendor/golang.org/x/sys/windows/race0.go | 25 + .../x/sys/windows/security_windows.go | 476 + vendor/golang.org/x/sys/windows/service.go | 164 + vendor/golang.org/x/sys/windows/str.go | 22 + vendor/golang.org/x/sys/windows/syscall.go | 71 + .../golang.org/x/sys/windows/syscall_test.go | 53 + .../x/sys/windows/syscall_windows.go | 1153 +++ .../x/sys/windows/syscall_windows_test.go | 107 + .../golang.org/x/sys/windows/types_windows.go | 1333 +++ .../x/sys/windows/types_windows_386.go | 22 + .../x/sys/windows/types_windows_amd64.go | 22 + .../x/sys/windows/zsyscall_windows.go | 2687 ++++++ .../gopkg.in/alexcesaro/statsd.v1/.travis.yml | 9 + vendor/gopkg.in/alexcesaro/statsd.v1/LICENSE | 20 + .../gopkg.in/alexcesaro/statsd.v1/README.md | 49 + vendor/gopkg.in/alexcesaro/statsd.v1/doc.go | 17 + .../alexcesaro/statsd.v1/examples_test.go | 82 + .../gopkg.in/alexcesaro/statsd.v1/statsd.go | 453 + .../alexcesaro/statsd.v1/statsd_test.go | 466 + vendor/gopkg.in/inf.v0/benchmark_test.go | 210 + vendor/gopkg.in/inf.v0/dec_go1_2_test.go | 33 + vendor/gopkg.in/inf.v0/dec_internal_test.go | 40 + vendor/gopkg.in/inf.v0/dec_test.go | 379 + vendor/gopkg.in/inf.v0/example_test.go | 62 + .../gopkg.in/inf.v0/rounder_example_test.go | 72 + vendor/gopkg.in/inf.v0/rounder_test.go | 109 + vendor/gopkg.in/ini.v1/.gitignore | 5 + vendor/gopkg.in/ini.v1/.travis.yml | 16 + vendor/gopkg.in/ini.v1/ini_test.go | 401 + vendor/gopkg.in/ini.v1/key_test.go | 537 ++ vendor/gopkg.in/ini.v1/section_test.go | 47 + vendor/gopkg.in/ini.v1/struct_test.go | 323 + vendor/gopkg.in/macaron.v1/.gitignore | 3 + vendor/gopkg.in/macaron.v1/.travis.yml | 13 + vendor/gopkg.in/macaron.v1/context_test.go | 375 + vendor/gopkg.in/macaron.v1/logger_test.go | 67 + vendor/gopkg.in/macaron.v1/macaron_test.go | 218 + vendor/gopkg.in/macaron.v1/recovery_test.go | 74 + vendor/gopkg.in/macaron.v1/render_test.go | 738 ++ .../macaron.v1/response_writer_test.go | 188 + .../macaron.v1/return_handler_test.go | 102 + vendor/gopkg.in/macaron.v1/router_test.go | 309 + vendor/gopkg.in/macaron.v1/static_test.go | 246 + .../raintank/schema.v0/event_gen_test.go | 125 + .../raintank/schema.v0/metric_gen_test.go | 351 + .../metric_serialization_bench_test.go | 144 + .../raintank/schema.v0/reslice_test.go | 66 + vendor/gopkg.in/raintank/schema.v1/event.go | 36 - .../gopkg.in/raintank/schema.v1/event_gen.go | 319 - vendor/gopkg.in/raintank/schema.v1/metric.go | 5 +- .../raintank/schema.v1/metric_gen_test.go | 351 + .../metric_serialization_bench_test.go | 144 + .../raintank/schema.v1/metric_test.go | 135 + .../gopkg.in/raintank/schema.v1/msg/format.go | 5 - vendor/gopkg.in/raintank/schema.v1/msg/msg.go | 111 - .../raintank/schema.v1/point_gen_test.go | 125 + .../raintank/schema.v1/reslice_test.go | 66 + vendor/vendor.json | 703 -- 1180 files changed, 181284 insertions(+), 4701 deletions(-) create mode 100644 Gopkg.lock create mode 100644 Gopkg.toml create mode 100644 vendor/github.com/DataDog/datadog-go/.travis.yml create mode 100644 vendor/github.com/DataDog/datadog-go/CHANGELOG.md create mode 100644 vendor/github.com/DataDog/datadog-go/LICENSE.txt create mode 100644 vendor/github.com/DataDog/datadog-go/README.md create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/README.md create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/statsd.go create mode 100644 vendor/github.com/DataDog/datadog-go/statsd/statsd_test.go create mode 100644 vendor/github.com/Dieterbe/artisanalhistogram/README.md create mode 100644 vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s_test.go create mode 100644 vendor/github.com/Dieterbe/profiletrigger/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore create mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 vendor/github.com/Microsoft/go-winio/backup_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/file.go create mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 vendor/github.com/Microsoft/go-winio/privileges_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd_test.go create mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/Shopify/sarama/.gitignore create mode 100644 vendor/github.com/Shopify/sarama/.travis.yml create mode 100644 vendor/github.com/Shopify/sarama/api_versions_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/async_producer_test.go create mode 100644 vendor/github.com/Shopify/sarama/broker_test.go create mode 100644 vendor/github.com/Shopify/sarama/client_test.go create mode 100644 vendor/github.com/Shopify/sarama/config_test.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members_test.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_test.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/functional_client_test.go create mode 100644 vendor/github.com/Shopify/sarama/functional_consumer_test.go create mode 100644 vendor/github.com/Shopify/sarama/functional_offset_manager_test.go create mode 100644 vendor/github.com/Shopify/sarama/functional_producer_test.go create mode 100644 vendor/github.com/Shopify/sarama/functional_test.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/message_test.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/offset_manager_test.go create mode 100644 vendor/github.com/Shopify/sarama/offset_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/offset_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/partitioner_test.go create mode 100644 vendor/github.com/Shopify/sarama/produce_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/produce_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/produce_set_test.go create mode 100644 vendor/github.com/Shopify/sarama/request_test.go create mode 100644 vendor/github.com/Shopify/sarama/response_header_test.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_request_test.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_response_test.go create mode 100644 vendor/github.com/Shopify/sarama/sync_producer_test.go create mode 100644 vendor/github.com/Shopify/sarama/utils_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/Sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/Sirupsen/logrus/entry_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/hook_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 vendor/github.com/Unknwon/com/.gitignore create mode 100644 vendor/github.com/Unknwon/com/.travis.yml create mode 100644 vendor/github.com/Unknwon/com/cmd_test.go create mode 100644 vendor/github.com/Unknwon/com/convert_test.go create mode 100644 vendor/github.com/Unknwon/com/dir_test.go create mode 100644 vendor/github.com/Unknwon/com/example_test.go create mode 100644 vendor/github.com/Unknwon/com/file_test.go create mode 100644 vendor/github.com/Unknwon/com/html_test.go create mode 100644 vendor/github.com/Unknwon/com/http_test.go create mode 100644 vendor/github.com/Unknwon/com/math_test.go create mode 100644 vendor/github.com/Unknwon/com/path_test.go create mode 100644 vendor/github.com/Unknwon/com/regex_test.go create mode 100644 vendor/github.com/Unknwon/com/slice_test.go create mode 100644 vendor/github.com/Unknwon/com/string_test.go create mode 100644 vendor/github.com/alyu/configparser/configparser_test.go create mode 100644 vendor/github.com/alyu/configparser/example_test.go create mode 100644 vendor/github.com/apache/thrift/.clang-format create mode 100644 vendor/github.com/apache/thrift/.dockerignore create mode 100755 vendor/github.com/apache/thrift/.editorconfig create mode 100644 vendor/github.com/apache/thrift/.gitattributes create mode 100644 vendor/github.com/apache/thrift/.gitignore create mode 100644 vendor/github.com/apache/thrift/.travis.yml create mode 100644 vendor/github.com/apache/thrift/CHANGES create mode 100644 vendor/github.com/apache/thrift/CMakeLists.txt create mode 100644 vendor/github.com/apache/thrift/CONTRIBUTING.md create mode 100644 vendor/github.com/apache/thrift/Dockerfile create mode 100755 vendor/github.com/apache/thrift/Makefile.am create mode 100644 vendor/github.com/apache/thrift/README.md create mode 100644 vendor/github.com/apache/thrift/Thrift.podspec create mode 100755 vendor/github.com/apache/thrift/appveyor.yml create mode 100755 vendor/github.com/apache/thrift/bootstrap.sh create mode 100644 vendor/github.com/apache/thrift/bower.json create mode 100755 vendor/github.com/apache/thrift/cleanup.sh create mode 100644 vendor/github.com/apache/thrift/composer.json create mode 100755 vendor/github.com/apache/thrift/configure.ac create mode 100755 vendor/github.com/apache/thrift/doap.rdf create mode 100644 vendor/github.com/apache/thrift/lib/Makefile.am create mode 100644 vendor/github.com/apache/thrift/lib/go/Makefile.am create mode 100644 vendor/github.com/apache/thrift/lib/go/README.md create mode 100644 vendor/github.com/apache/thrift/lib/go/coding_standards.md create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/exception_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/framed_transport_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_client_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/lowlevel_benchmarks_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/rich_transport_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/serializer_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/serializer_types_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_socket_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_exception_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_test.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport_test.go create mode 100644 vendor/github.com/apache/thrift/package.json create mode 100755 vendor/github.com/apache/thrift/sonar-project.properties create mode 100644 vendor/github.com/araddon/gou/.gitignore create mode 100644 vendor/github.com/araddon/gou/coerce_test.go create mode 100644 vendor/github.com/araddon/gou/jsonhelper_test.go create mode 100644 vendor/github.com/araddon/gou/uid_test.go create mode 100755 vendor/github.com/armon/go-metrics/.gitignore create mode 100644 vendor/github.com/armon/go-metrics/inmem_signal_test.go create mode 100644 vendor/github.com/armon/go-metrics/inmem_test.go create mode 100644 vendor/github.com/armon/go-metrics/metrics_test.go create mode 100755 vendor/github.com/armon/go-metrics/sink_test.go create mode 100755 vendor/github.com/armon/go-metrics/start_test.go create mode 100644 vendor/github.com/armon/go-metrics/statsd_test.go create mode 100755 vendor/github.com/armon/go-metrics/statsite_test.go create mode 100644 vendor/github.com/bitly/go-hostpool/.gitignore create mode 100644 vendor/github.com/bitly/go-hostpool/.travis.yml create mode 100644 vendor/github.com/bitly/go-hostpool/example_test.go create mode 100644 vendor/github.com/bitly/go-hostpool/hostpool_test.go create mode 100644 vendor/github.com/bsm/sarama-cluster/.gitignore create mode 100644 vendor/github.com/bsm/sarama-cluster/.travis.yml create mode 100644 vendor/github.com/bsm/sarama-cluster/balancer_test.go create mode 100644 vendor/github.com/bsm/sarama-cluster/cluster_test.go create mode 100644 vendor/github.com/bsm/sarama-cluster/config_test.go create mode 100644 vendor/github.com/bsm/sarama-cluster/consumer_test.go create mode 100644 vendor/github.com/bsm/sarama-cluster/partitions_test.go create mode 100644 vendor/github.com/codahale/hdrhistogram/.travis.yml create mode 100644 vendor/github.com/codahale/hdrhistogram/hdr_test.go create mode 100644 vendor/github.com/codahale/hdrhistogram/window_test.go delete mode 100755 vendor/github.com/codeskyblue/go-uuid/dce.go delete mode 100755 vendor/github.com/codeskyblue/go-uuid/doc.go delete mode 100644 vendor/github.com/codeskyblue/go-uuid/hash.go delete mode 100755 vendor/github.com/codeskyblue/go-uuid/node.go delete mode 100755 vendor/github.com/codeskyblue/go-uuid/time.go delete mode 100644 vendor/github.com/codeskyblue/go-uuid/util.go delete mode 100755 vendor/github.com/codeskyblue/go-uuid/uuid.go delete mode 100644 vendor/github.com/codeskyblue/go-uuid/version1.go delete mode 100644 vendor/github.com/codeskyblue/go-uuid/version4.go create mode 100644 vendor/github.com/davecgh/go-spew/.gitignore create mode 100644 vendor/github.com/davecgh/go-spew/.travis.yml create mode 100644 vendor/github.com/davecgh/go-spew/README.md create mode 100644 vendor/github.com/davecgh/go-spew/cov_report.sh create mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go create mode 100644 vendor/github.com/davecgh/go-spew/test_coverage.txt create mode 100644 vendor/github.com/dgryski/go-bits/bits_test.go create mode 100644 vendor/github.com/dgryski/go-linlog/linlog_test.go create mode 100644 vendor/github.com/dgryski/go-tsz/.gitignore create mode 100644 vendor/github.com/dgryski/go-tsz/tsz_test.go create mode 100644 vendor/github.com/docker/distribution/.gitignore create mode 100644 vendor/github.com/docker/distribution/.mailmap create mode 100644 vendor/github.com/docker/distribution/AUTHORS create mode 100644 vendor/github.com/docker/distribution/BUILDING.md create mode 100644 vendor/github.com/docker/distribution/CHANGELOG.md create mode 100644 vendor/github.com/docker/distribution/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/distribution/Dockerfile create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/MAINTAINERS create mode 100644 vendor/github.com/docker/distribution/Makefile create mode 100644 vendor/github.com/docker/distribution/README.md create mode 100644 vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/docker/distribution/ROADMAP.md create mode 100644 vendor/github.com/docker/distribution/blobs.go create mode 100644 vendor/github.com/docker/distribution/circle.yml create mode 100755 vendor/github.com/docker/distribution/coverpkg.sh create mode 100644 vendor/github.com/docker/distribution/digest/digest.go create mode 100644 vendor/github.com/docker/distribution/digest/digest_test.go create mode 100644 vendor/github.com/docker/distribution/digest/digester.go create mode 100644 vendor/github.com/docker/distribution/digest/digester_resumable_test.go create mode 100644 vendor/github.com/docker/distribution/digest/doc.go create mode 100644 vendor/github.com/docker/distribution/digest/set.go create mode 100644 vendor/github.com/docker/distribution/digest/set_test.go create mode 100644 vendor/github.com/docker/distribution/digest/verifiers.go create mode 100644 vendor/github.com/docker/distribution/digest/verifiers_test.go create mode 100644 vendor/github.com/docker/distribution/doc.go create mode 100644 vendor/github.com/docker/distribution/errors.go create mode 100644 vendor/github.com/docker/distribution/manifests.go create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/reference_test.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp_test.go create mode 100644 vendor/github.com/docker/distribution/registry.go create mode 100644 vendor/github.com/docker/distribution/tags.go create mode 100644 vendor/github.com/docker/docker/.dockerignore create mode 100644 vendor/github.com/docker/docker/.gitignore create mode 100644 vendor/github.com/docker/docker/.mailmap create mode 100644 vendor/github.com/docker/docker/AUTHORS create mode 100644 vendor/github.com/docker/docker/CHANGELOG.md create mode 100644 vendor/github.com/docker/docker/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/docker/Dockerfile create mode 100644 vendor/github.com/docker/docker/Dockerfile.aarch64 create mode 100644 vendor/github.com/docker/docker/Dockerfile.armhf create mode 100644 vendor/github.com/docker/docker/Dockerfile.ppc64le create mode 100644 vendor/github.com/docker/docker/Dockerfile.s390x create mode 100644 vendor/github.com/docker/docker/Dockerfile.simple create mode 100644 vendor/github.com/docker/docker/Dockerfile.solaris create mode 100644 vendor/github.com/docker/docker/Dockerfile.windows create mode 100644 vendor/github.com/docker/docker/MAINTAINERS create mode 100644 vendor/github.com/docker/docker/Makefile create mode 100644 vendor/github.com/docker/docker/README.md create mode 100644 vendor/github.com/docker/docker/ROADMAP.md create mode 100644 vendor/github.com/docker/docker/VENDORING.md create mode 100644 vendor/github.com/docker/docker/VERSION create mode 100644 vendor/github.com/docker/docker/api/README.md create mode 100644 vendor/github.com/docker/docker/api/common.go create mode 100644 vendor/github.com/docker/docker/api/common_test.go create mode 100644 vendor/github.com/docker/docker/api/common_unix.go create mode 100644 vendor/github.com/docker/docker/api/common_windows.go create mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml create mode 100644 vendor/github.com/docker/docker/api/swagger.yaml delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_changes.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go delete mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse_test.go delete mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go delete mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go delete mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go create mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference.go create mode 100644 vendor/github.com/docker/docker/api/types/reference/image_reference_test.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice_test.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go delete mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto create mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert_test.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp_test.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare_test.go create mode 100644 vendor/github.com/docker/docker/cli/cobra.go create mode 100644 vendor/github.com/docker/docker/cli/error.go create mode 100644 vendor/github.com/docker/docker/cli/required.go create mode 100644 vendor/github.com/docker/docker/client/README.md create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create_test.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete_test.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list_test.go create mode 100644 vendor/github.com/docker/docker/client/client.go create mode 100644 vendor/github.com/docker/docker/client/client_mock_test.go create mode 100644 vendor/github.com/docker/docker/client/client_test.go create mode 100644 vendor/github.com/docker/docker/client/client_unix.go create mode 100644 vendor/github.com/docker/docker/client/client_windows.go create mode 100644 vendor/github.com/docker/docker/client/container_attach.go create mode 100644 vendor/github.com/docker/docker/client/container_commit.go create mode 100644 vendor/github.com/docker/docker/client/container_commit_test.go create mode 100644 vendor/github.com/docker/docker/client/container_copy.go create mode 100644 vendor/github.com/docker/docker/client/container_copy_test.go create mode 100644 vendor/github.com/docker/docker/client/container_create.go create mode 100644 vendor/github.com/docker/docker/client/container_create_test.go create mode 100644 vendor/github.com/docker/docker/client/container_diff.go create mode 100644 vendor/github.com/docker/docker/client/container_diff_test.go create mode 100644 vendor/github.com/docker/docker/client/container_exec.go create mode 100644 vendor/github.com/docker/docker/client/container_exec_test.go create mode 100644 vendor/github.com/docker/docker/client/container_export.go create mode 100644 vendor/github.com/docker/docker/client/container_export_test.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/container_kill.go create mode 100644 vendor/github.com/docker/docker/client/container_kill_test.go create mode 100644 vendor/github.com/docker/docker/client/container_list.go create mode 100644 vendor/github.com/docker/docker/client/container_list_test.go create mode 100644 vendor/github.com/docker/docker/client/container_logs.go create mode 100644 vendor/github.com/docker/docker/client/container_logs_test.go create mode 100644 vendor/github.com/docker/docker/client/container_pause.go create mode 100644 vendor/github.com/docker/docker/client/container_pause_test.go create mode 100644 vendor/github.com/docker/docker/client/container_prune.go create mode 100644 vendor/github.com/docker/docker/client/container_remove.go create mode 100644 vendor/github.com/docker/docker/client/container_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/container_rename.go create mode 100644 vendor/github.com/docker/docker/client/container_rename_test.go create mode 100644 vendor/github.com/docker/docker/client/container_resize.go create mode 100644 vendor/github.com/docker/docker/client/container_resize_test.go create mode 100644 vendor/github.com/docker/docker/client/container_restart.go create mode 100644 vendor/github.com/docker/docker/client/container_restart_test.go create mode 100644 vendor/github.com/docker/docker/client/container_start.go create mode 100644 vendor/github.com/docker/docker/client/container_start_test.go create mode 100644 vendor/github.com/docker/docker/client/container_stats.go create mode 100644 vendor/github.com/docker/docker/client/container_stats_test.go create mode 100644 vendor/github.com/docker/docker/client/container_stop.go create mode 100644 vendor/github.com/docker/docker/client/container_stop_test.go create mode 100644 vendor/github.com/docker/docker/client/container_top.go create mode 100644 vendor/github.com/docker/docker/client/container_top_test.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause_test.go create mode 100644 vendor/github.com/docker/docker/client/container_update.go create mode 100644 vendor/github.com/docker/docker/client/container_update_test.go create mode 100644 vendor/github.com/docker/docker/client/container_wait.go create mode 100644 vendor/github.com/docker/docker/client/container_wait_test.go create mode 100644 vendor/github.com/docker/docker/client/disk_usage.go create mode 100644 vendor/github.com/docker/docker/client/errors.go create mode 100644 vendor/github.com/docker/docker/client/events.go create mode 100644 vendor/github.com/docker/docker/client/events_test.go create mode 100644 vendor/github.com/docker/docker/client/hijack.go create mode 100644 vendor/github.com/docker/docker/client/image_build.go create mode 100644 vendor/github.com/docker/docker/client/image_build_test.go create mode 100644 vendor/github.com/docker/docker/client/image_create.go create mode 100644 vendor/github.com/docker/docker/client/image_create_test.go create mode 100644 vendor/github.com/docker/docker/client/image_history.go create mode 100644 vendor/github.com/docker/docker/client/image_history_test.go create mode 100644 vendor/github.com/docker/docker/client/image_import.go create mode 100644 vendor/github.com/docker/docker/client/image_import_test.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/image_list.go create mode 100644 vendor/github.com/docker/docker/client/image_list_test.go create mode 100644 vendor/github.com/docker/docker/client/image_load.go create mode 100644 vendor/github.com/docker/docker/client/image_load_test.go create mode 100644 vendor/github.com/docker/docker/client/image_prune.go create mode 100644 vendor/github.com/docker/docker/client/image_pull.go create mode 100644 vendor/github.com/docker/docker/client/image_pull_test.go create mode 100644 vendor/github.com/docker/docker/client/image_push.go create mode 100644 vendor/github.com/docker/docker/client/image_push_test.go create mode 100644 vendor/github.com/docker/docker/client/image_remove.go create mode 100644 vendor/github.com/docker/docker/client/image_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/image_save.go create mode 100644 vendor/github.com/docker/docker/client/image_save_test.go create mode 100644 vendor/github.com/docker/docker/client/image_search.go create mode 100644 vendor/github.com/docker/docker/client/image_search_test.go create mode 100644 vendor/github.com/docker/docker/client/image_tag.go create mode 100644 vendor/github.com/docker/docker/client/image_tag_test.go create mode 100644 vendor/github.com/docker/docker/client/info.go create mode 100644 vendor/github.com/docker/docker/client/info_test.go create mode 100644 vendor/github.com/docker/docker/client/interface.go create mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go create mode 100644 vendor/github.com/docker/docker/client/interface_stable.go create mode 100644 vendor/github.com/docker/docker/client/login.go create mode 100644 vendor/github.com/docker/docker/client/network_connect.go create mode 100644 vendor/github.com/docker/docker/client/network_connect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_create.go create mode 100644 vendor/github.com/docker/docker/client/network_create_test.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/network_list.go create mode 100644 vendor/github.com/docker/docker/client/network_list_test.go create mode 100644 vendor/github.com/docker/docker/client/network_prune.go create mode 100644 vendor/github.com/docker/docker/client/network_remove.go create mode 100644 vendor/github.com/docker/docker/client/network_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/node_list.go create mode 100644 vendor/github.com/docker/docker/client/node_list_test.go create mode 100644 vendor/github.com/docker/docker/client/node_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/node_update.go create mode 100644 vendor/github.com/docker/docker/client/node_update_test.go create mode 100644 vendor/github.com/docker/docker/client/ping.go create mode 100644 vendor/github.com/docker/docker/client/plugin_create.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_install.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set_test.go create mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go create mode 100644 vendor/github.com/docker/docker/client/request.go create mode 100644 vendor/github.com/docker/docker/client/request_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_create_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_list.go create mode 100644 vendor/github.com/docker/docker/client/secret_list_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/secret_update.go create mode 100644 vendor/github.com/docker/docker/client/secret_update_test.go create mode 100644 vendor/github.com/docker/docker/client/service_create.go create mode 100644 vendor/github.com/docker/docker/client/service_create_test.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/service_list.go create mode 100644 vendor/github.com/docker/docker/client/service_list_test.go create mode 100644 vendor/github.com/docker/docker/client/service_logs.go create mode 100644 vendor/github.com/docker/docker/client/service_logs_test.go create mode 100644 vendor/github.com/docker/docker/client/service_remove.go create mode 100644 vendor/github.com/docker/docker/client/service_remove_test.go create mode 100644 vendor/github.com/docker/docker/client/service_update.go create mode 100644 vendor/github.com/docker/docker/client/service_update_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave_test.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update_test.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/task_list.go create mode 100644 vendor/github.com/docker/docker/client/task_list_test.go create mode 100644 vendor/github.com/docker/docker/client/transport.go create mode 100644 vendor/github.com/docker/docker/client/utils.go create mode 100644 vendor/github.com/docker/docker/client/version.go create mode 100644 vendor/github.com/docker/docker/client/volume_create.go create mode 100644 vendor/github.com/docker/docker/client/volume_create_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_list.go create mode 100644 vendor/github.com/docker/docker/client/volume_list_test.go create mode 100644 vendor/github.com/docker/docker/client/volume_prune.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove_test.go create mode 100644 vendor/github.com/docker/docker/pkg/README.md create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go create mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go create mode 100644 vendor/github.com/docker/docker/poule.yml create mode 100644 vendor/github.com/docker/docker/vendor.conf create mode 100644 vendor/github.com/docker/go-connections/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-connections/LICENSE create mode 100644 vendor/github.com/docker/go-connections/MAINTAINERS create mode 100644 vendor/github.com/docker/go-connections/README.md create mode 100644 vendor/github.com/docker/go-connections/circle.yml create mode 100644 vendor/github.com/docker/go-connections/doc.go create mode 100644 vendor/github.com/docker/go-connections/nat/nat.go create mode 100644 vendor/github.com/docker/go-connections/nat/nat_test.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse_test.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort_test.go create mode 100644 vendor/github.com/docker/go-connections/sockets/README.md create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go create mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go create mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_test.go create mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/MAINTAINERS create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/circle.yml create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/duration_test.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/size_test.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/docker/go-units/ulimit_test.go create mode 100644 vendor/github.com/eapache/go-resiliency/.gitignore create mode 100644 vendor/github.com/eapache/go-resiliency/.travis.yml create mode 100644 vendor/github.com/eapache/go-resiliency/README.md create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.gitignore create mode 100644 vendor/github.com/eapache/go-xerial-snappy/.travis.yml create mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy_test.go create mode 100644 vendor/github.com/eapache/queue/.gitignore create mode 100644 vendor/github.com/eapache/queue/.travis.yml create mode 100644 vendor/github.com/eapache/queue/queue_test.go create mode 100644 vendor/github.com/go-macaron/binding/.gitignore create mode 100644 vendor/github.com/go-macaron/binding/.travis.yml create mode 100644 vendor/github.com/go-macaron/binding/bind_test.go create mode 100755 vendor/github.com/go-macaron/binding/common_test.go create mode 100755 vendor/github.com/go-macaron/binding/errorhandler_test.go create mode 100755 vendor/github.com/go-macaron/binding/errors_test.go create mode 100755 vendor/github.com/go-macaron/binding/file_test.go create mode 100755 vendor/github.com/go-macaron/binding/form_test.go create mode 100755 vendor/github.com/go-macaron/binding/json_test.go create mode 100755 vendor/github.com/go-macaron/binding/misc_test.go create mode 100755 vendor/github.com/go-macaron/binding/multipart_test.go create mode 100755 vendor/github.com/go-macaron/binding/validate_test.go create mode 100644 vendor/github.com/go-macaron/inject/.travis.yml create mode 100644 vendor/github.com/go-macaron/inject/inject_test.go create mode 100644 vendor/github.com/gocql/gocql/.gitignore create mode 100644 vendor/github.com/gocql/gocql/.travis.yml create mode 100644 vendor/github.com/gocql/gocql/address_translators_test.go create mode 100644 vendor/github.com/gocql/gocql/batch_test.go create mode 100644 vendor/github.com/gocql/gocql/cass1batch_test.go create mode 100644 vendor/github.com/gocql/gocql/cassandra_test.go create mode 100644 vendor/github.com/gocql/gocql/cluster_test.go create mode 100644 vendor/github.com/gocql/gocql/common_test.go create mode 100644 vendor/github.com/gocql/gocql/compressor_test.go create mode 100644 vendor/github.com/gocql/gocql/conn_test.go create mode 100644 vendor/github.com/gocql/gocql/control_test.go create mode 100644 vendor/github.com/gocql/gocql/errors_test.go create mode 100644 vendor/github.com/gocql/gocql/events_ccm_test.go create mode 100644 vendor/github.com/gocql/gocql/events_test.go create mode 100644 vendor/github.com/gocql/gocql/filters_test.go create mode 100644 vendor/github.com/gocql/gocql/frame_test.go create mode 100644 vendor/github.com/gocql/gocql/framer_bench_test.go create mode 100644 vendor/github.com/gocql/gocql/host_source_test.go create mode 100644 vendor/github.com/gocql/gocql/internal/lru/lru_test.go create mode 100644 vendor/github.com/gocql/gocql/internal/murmur/murmur_test.go create mode 100644 vendor/github.com/gocql/gocql/internal/streams/streams_test.go create mode 100644 vendor/github.com/gocql/gocql/marshal_test.go create mode 100644 vendor/github.com/gocql/gocql/metadata_test.go create mode 100644 vendor/github.com/gocql/gocql/policies_test.go create mode 100644 vendor/github.com/gocql/gocql/ring_test.go create mode 100644 vendor/github.com/gocql/gocql/session_connect_test.go create mode 100644 vendor/github.com/gocql/gocql/session_test.go create mode 100644 vendor/github.com/gocql/gocql/stress_test.go create mode 100644 vendor/github.com/gocql/gocql/token_test.go create mode 100644 vendor/github.com/gocql/gocql/topology_test.go create mode 100644 vendor/github.com/gocql/gocql/tuple_test.go create mode 100644 vendor/github.com/gocql/gocql/udt_test.go create mode 100644 vendor/github.com/gocql/gocql/uuid_test.go create mode 100644 vendor/github.com/gocql/gocql/wiki_test.go create mode 100644 vendor/github.com/golang/snappy/.gitignore create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/golden_test.go create mode 100644 vendor/github.com/golang/snappy/snappy_test.go create mode 100644 vendor/github.com/google/go-querystring/.gitignore create mode 100644 vendor/github.com/google/go-querystring/CONTRIBUTING.md create mode 100644 vendor/github.com/google/go-querystring/README.md create mode 100644 vendor/github.com/google/go-querystring/query/encode_test.go create mode 100644 vendor/github.com/gopherjs/gopherjs/.gitignore create mode 100644 vendor/github.com/gopherjs/gopherjs/README.md create mode 100644 vendor/github.com/gopherjs/gopherjs/circle.yml create mode 100644 vendor/github.com/gopherjs/gopherjs/js/js_test.go create mode 100644 vendor/github.com/gopherjs/gopherjs/tool.go create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/.gitattributes create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/.gitignore create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/Dockerfile create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/LICENSE create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/Makefile create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/README.md create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/circle.yml create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/clock/clock.go create mode 100755 vendor/github.com/graphite-ng/carbon-relay-ng/deploy-docker.sh create mode 100644 vendor/github.com/graphite-ng/carbon-relay-ng/grafana-dashboard.json create mode 100644 vendor/github.com/hailocab/go-hostpool/.gitignore create mode 100644 vendor/github.com/hailocab/go-hostpool/.travis.yml create mode 100644 vendor/github.com/hailocab/go-hostpool/example_test.go create mode 100644 vendor/github.com/hailocab/go-hostpool/hostpool_test.go create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap_test.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/README.md create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/bench_test.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/codecs_test.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/codec/z_helper_test.go create mode 100644 vendor/github.com/hashicorp/go-msgpack/msgpack.org.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror_test.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/.gitignore create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddr_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifaddrs_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ifattr_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddr_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipaddrs_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv4addr_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/ipv6addr_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/rfc_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/route_info_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddr_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/sockaddrs_test.go create mode 100644 vendor/github.com/hashicorp/go-sockaddr/unixsock_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/.gitignore create mode 100644 vendor/github.com/hashicorp/memberlist/awareness_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/broadcast_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/integ_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/keyring_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/logging_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/memberlist_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/net_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/queue_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/security_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/state_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/suspicion_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/transport_test.go create mode 100644 vendor/github.com/hashicorp/memberlist/util_test.go create mode 100644 vendor/github.com/jpillora/backoff/backoff_test.go create mode 100644 vendor/github.com/jtolds/gls/context_test.go create mode 100644 vendor/github.com/kisielk/og-rek/.gitignore create mode 100644 vendor/github.com/kisielk/og-rek/.travis.yml create mode 100644 vendor/github.com/kisielk/og-rek/encode_test.go create mode 100644 vendor/github.com/kisielk/og-rek/ogorek_test.go create mode 100644 vendor/github.com/kisielk/whisper-go/whisper/whisper_test.go create mode 100644 vendor/github.com/klauspost/compress/.gitignore create mode 100644 vendor/github.com/klauspost/compress/.travis.yml create mode 100644 vendor/github.com/klauspost/compress/README.md create mode 100644 vendor/github.com/klauspost/compress/flate/asm_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/copy_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/deflate_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/flate_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/inflate_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/reader_test.go create mode 100644 vendor/github.com/klauspost/compress/flate/writer_test.go create mode 100644 vendor/github.com/klauspost/compress/gzip/example_test.go create mode 100644 vendor/github.com/klauspost/compress/gzip/gunzip_test.go create mode 100644 vendor/github.com/klauspost/compress/gzip/gzip_test.go create mode 100644 vendor/github.com/klauspost/cpuid/.gitignore create mode 100644 vendor/github.com/klauspost/cpuid/.travis.yml create mode 100644 vendor/github.com/klauspost/cpuid/cpuid_test.go create mode 100644 vendor/github.com/klauspost/cpuid/mockcpu_test.go create mode 100644 vendor/github.com/klauspost/crc32/.gitignore create mode 100644 vendor/github.com/klauspost/crc32/.travis.yml create mode 100644 vendor/github.com/klauspost/crc32/crc32_test.go create mode 100644 vendor/github.com/klauspost/crc32/example_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/.drone.yml create mode 100644 vendor/github.com/mattbaird/elastigo/.gitignore create mode 100644 vendor/github.com/mattbaird/elastigo/.gitmodules create mode 100644 vendor/github.com/mattbaird/elastigo/.travis.yml create mode 100644 vendor/github.com/mattbaird/elastigo/HACKING.md create mode 100644 vendor/github.com/mattbaird/elastigo/README.md create mode 100644 vendor/github.com/mattbaird/elastigo/Vagrantfile create mode 100644 vendor/github.com/mattbaird/elastigo/client.go create mode 100644 vendor/github.com/mattbaird/elastigo/doc.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/cataliasinfo_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/catindexinfo_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/catshardinfo_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/connection_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/corebulk_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/coreexample_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/corepercolate_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/coresearch_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/coretest_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/request_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/searchaggregate_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/searchfacet_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/searchfilter_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/searchhighlight_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/searchsearch_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/setup_test.go create mode 100644 vendor/github.com/mattbaird/elastigo/lib/shared_test.go create mode 100644 vendor/github.com/metrics20/go-metrics20/README.md create mode 100644 vendor/github.com/metrics20/go-metrics20/carbon20/manipulate_test.go create mode 100644 vendor/github.com/metrics20/go-metrics20/carbon20/validate_test.go create mode 100644 vendor/github.com/metrics20/go-metrics20/carbon20/version_test.go create mode 100644 vendor/github.com/miekg/dns/.gitignore create mode 100644 vendor/github.com/miekg/dns/.travis.yml create mode 100644 vendor/github.com/miekg/dns/client_test.go create mode 100644 vendor/github.com/miekg/dns/clientconfig_test.go create mode 100644 vendor/github.com/miekg/dns/dns_bench_test.go create mode 100644 vendor/github.com/miekg/dns/dns_test.go create mode 100644 vendor/github.com/miekg/dns/dnssec_test.go create mode 100644 vendor/github.com/miekg/dns/dyn_test.go create mode 100644 vendor/github.com/miekg/dns/edns_test.go create mode 100644 vendor/github.com/miekg/dns/example_test.go create mode 100644 vendor/github.com/miekg/dns/fuzz_test.go create mode 100644 vendor/github.com/miekg/dns/issue_test.go create mode 100644 vendor/github.com/miekg/dns/labels_test.go create mode 100644 vendor/github.com/miekg/dns/nsecx_test.go create mode 100644 vendor/github.com/miekg/dns/parse_test.go create mode 100644 vendor/github.com/miekg/dns/privaterr_test.go create mode 100644 vendor/github.com/miekg/dns/remote_test.go create mode 100644 vendor/github.com/miekg/dns/sanitize_test.go create mode 100644 vendor/github.com/miekg/dns/server_test.go create mode 100644 vendor/github.com/miekg/dns/sig0_test.go create mode 100644 vendor/github.com/miekg/dns/tsig_test.go create mode 100644 vendor/github.com/miekg/dns/types_test.go create mode 100644 vendor/github.com/miekg/dns/update_test.go create mode 100644 vendor/github.com/miekg/dns/xfr_test.go create mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE create mode 100644 vendor/github.com/mitchellh/go-homedir/README.md create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go create mode 100644 vendor/github.com/mitchellh/go-homedir/homedir_test.go create mode 100644 vendor/github.com/mreiferson/go-snappystream/.travis.yml create mode 100644 vendor/github.com/mreiferson/go-snappystream/fixturedata_test.go create mode 100644 vendor/github.com/mreiferson/go-snappystream/reader_test.go create mode 100644 vendor/github.com/mreiferson/go-snappystream/readwrite_test.go create mode 100644 vendor/github.com/mreiferson/go-snappystream/snappy-go/snappy_test.go create mode 100644 vendor/github.com/mreiferson/go-snappystream/writer_test.go create mode 100644 vendor/github.com/nsqio/go-nsq/.travis.yml create mode 100644 vendor/github.com/nsqio/go-nsq/command_test.go create mode 100644 vendor/github.com/nsqio/go-nsq/config_flag_test.go create mode 100644 vendor/github.com/nsqio/go-nsq/config_test.go create mode 100644 vendor/github.com/nsqio/go-nsq/consumer_test.go create mode 100644 vendor/github.com/nsqio/go-nsq/mock_test.go create mode 100755 vendor/github.com/nsqio/go-nsq/producer_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/.gitignore create mode 100644 vendor/github.com/opentracing/opentracing-go/.travis.yml create mode 100644 vendor/github.com/opentracing/opentracing-go/ext/tags_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/field_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/options_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/propagation_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/testtracer_test.go create mode 100644 vendor/github.com/philhofer/fwd/reader_test.go create mode 100644 vendor/github.com/philhofer/fwd/writer_test.go create mode 100644 vendor/github.com/pkg/errors/.gitignore create mode 100644 vendor/github.com/pkg/errors/.travis.yml create mode 100644 vendor/github.com/pkg/errors/LICENSE create mode 100644 vendor/github.com/pkg/errors/README.md create mode 100644 vendor/github.com/pkg/errors/appveyor.yml create mode 100644 vendor/github.com/pkg/errors/bench_test.go create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/errors_test.go create mode 100644 vendor/github.com/pkg/errors/example_test.go create mode 100644 vendor/github.com/pkg/errors/format_test.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/github.com/pkg/errors/stack_test.go create mode 100644 vendor/github.com/raintank/dur/datetime_test.go create mode 100644 vendor/github.com/raintank/dur/duration_test.go create mode 100644 vendor/github.com/raintank/gziper/.gitignore create mode 100644 vendor/github.com/raintank/gziper/gzip_test.go create mode 100644 vendor/github.com/raintank/met/LICENSE create mode 100644 vendor/github.com/raintank/met/NOTICE create mode 100644 vendor/github.com/raintank/met/README.md create mode 100644 vendor/github.com/raintank/met/dogstatsd/count.go create mode 100644 vendor/github.com/raintank/met/dogstatsd/gauge.go create mode 100644 vendor/github.com/raintank/met/dogstatsd/init.go create mode 100644 vendor/github.com/raintank/met/dogstatsd/meter.go create mode 100644 vendor/github.com/raintank/met/dogstatsd/timer.go create mode 100644 vendor/github.com/raintank/met/helper/helper.go create mode 100644 vendor/github.com/raintank/met/interfaces.go create mode 100644 vendor/github.com/raintank/met/statsd/count.go create mode 100644 vendor/github.com/raintank/met/statsd/gauge.go create mode 100644 vendor/github.com/raintank/met/statsd/init.go create mode 100644 vendor/github.com/raintank/met/statsd/meter.go create mode 100644 vendor/github.com/raintank/met/statsd/timer.go create mode 100644 vendor/github.com/raintank/worldping-api/.bra.toml create mode 100644 vendor/github.com/raintank/worldping-api/.editorconfig create mode 100644 vendor/github.com/raintank/worldping-api/.gitignore create mode 100644 vendor/github.com/raintank/worldping-api/.gitmodules create mode 100644 vendor/github.com/raintank/worldping-api/CHANGELOG.md create mode 100644 vendor/github.com/raintank/worldping-api/README.md create mode 100644 vendor/github.com/raintank/worldping-api/apiary.apib create mode 100644 vendor/github.com/raintank/worldping-api/circle.yml create mode 100644 vendor/github.com/raintank/worldping-api/main.go create mode 100755 vendor/github.com/raintank/worldping-api/test.sh create mode 100644 vendor/github.com/rakyll/globalconf/.travis.yml create mode 100644 vendor/github.com/rakyll/globalconf/globalconf_test.go create mode 100644 vendor/github.com/rakyll/goini/.gitignore create mode 100644 vendor/github.com/rakyll/goini/ini_test.go create mode 100644 vendor/github.com/rs/cors/.travis.yml create mode 100644 vendor/github.com/rs/cors/bench_test.go create mode 100644 vendor/github.com/rs/cors/cors_test.go create mode 100644 vendor/github.com/rs/cors/utils_test.go create mode 100644 vendor/github.com/rs/xhandler/.travis.yml create mode 100644 vendor/github.com/rs/xhandler/chain_example_test.go create mode 100644 vendor/github.com/rs/xhandler/chain_test.go create mode 100644 vendor/github.com/rs/xhandler/middleware_test.go create mode 100644 vendor/github.com/rs/xhandler/xhandler_example_test.go create mode 100644 vendor/github.com/rs/xhandler/xhandler_test.go create mode 100644 vendor/github.com/sean-/seed/.gitignore create mode 100644 vendor/github.com/sean-/seed/init_test.go create mode 100644 vendor/github.com/sergi/go-diff/.gitignore create mode 100644 vendor/github.com/sergi/go-diff/.travis.yml create mode 100644 vendor/github.com/sergi/go-diff/APACHE-LICENSE-2.0 create mode 100644 vendor/github.com/sergi/go-diff/AUTHORS create mode 100644 vendor/github.com/sergi/go-diff/CONTRIBUTORS create mode 100644 vendor/github.com/sergi/go-diff/Makefile create mode 100644 vendor/github.com/sergi/go-diff/README.md create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/benchutil_test.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diff_test.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/match_test.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/patch_test.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil_test.go create mode 100644 vendor/github.com/smartystreets/assertions/.gitignore create mode 100644 vendor/github.com/smartystreets/assertions/.travis.yml create mode 100644 vendor/github.com/smartystreets/assertions/collections_test.go create mode 100644 vendor/github.com/smartystreets/assertions/doc_test.go create mode 100644 vendor/github.com/smartystreets/assertions/equality_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/Makefile create mode 100644 vendor/github.com/smartystreets/assertions/internal/go-render/.travis.yml create mode 100644 vendor/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py create mode 100644 vendor/github.com/smartystreets/assertions/internal/go-render/README.md create mode 100644 vendor/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS create mode 100644 vendor/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml create mode 100644 vendor/github.com/smartystreets/assertions/internal/go-render/render/render_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/.gitignore create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/any_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/error_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/not_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go create mode 100644 vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go create mode 100644 vendor/github.com/smartystreets/assertions/panic_test.go create mode 100644 vendor/github.com/smartystreets/assertions/quantity_test.go create mode 100644 vendor/github.com/smartystreets/assertions/serializer_test.go create mode 100644 vendor/github.com/smartystreets/assertions/strings_test.go create mode 100644 vendor/github.com/smartystreets/assertions/time_test.go create mode 100644 vendor/github.com/smartystreets/assertions/type_test.go create mode 100644 vendor/github.com/smartystreets/assertions/utilities_for_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/.gitignore create mode 100644 vendor/github.com/smartystreets/goconvey/.travis.yml create mode 100644 vendor/github.com/smartystreets/goconvey/CONTRIBUTING.md create mode 100644 vendor/github.com/smartystreets/goconvey/README.md create mode 100644 vendor/github.com/smartystreets/goconvey/convey/focused_execution_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/gotest/doc_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/isolated_execution_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/reporting/dot_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/reporting/printer_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/reporting/problems_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/convey/story_conventions_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/dependencies.go create mode 100644 vendor/github.com/smartystreets/goconvey/doc_test.go create mode 100644 vendor/github.com/smartystreets/goconvey/goconvey.go create mode 100644 vendor/github.com/syndtr/goleveldb/.travis.yml create mode 100644 vendor/github.com/syndtr/goleveldb/README.md create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/db_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/external_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/key_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go create mode 100644 vendor/github.com/syndtr/goleveldb/leveldb/version_test.go create mode 100644 vendor/github.com/tinylib/msgp/.gitignore create mode 100644 vendor/github.com/tinylib/msgp/.travis.yml create mode 100644 vendor/github.com/tinylib/msgp/Makefile create mode 100644 vendor/github.com/tinylib/msgp/README.md create mode 100644 vendor/github.com/tinylib/msgp/main.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/defs_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/edit_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/extension_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/file_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/floatbench_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/json_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/number_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/raw_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/read_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go create mode 100644 vendor/github.com/tinylib/msgp/msgp/write_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/.gitignore create mode 100644 vendor/github.com/uber/jaeger-client-go/.gitmodules create mode 100644 vendor/github.com/uber/jaeger-client-go/.travis.yml create mode 100644 vendor/github.com/uber/jaeger-client-go/DCO create mode 100644 vendor/github.com/uber/jaeger-client-go/baggage_setter_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/config/config_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/config/example_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/config/options_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/constants_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/context_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/header_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/log/logger_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/logger_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/metrics_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/observer_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/propagation_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/reporter_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/sampler_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/span_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/thrift/.nocover create mode 100644 vendor/github.com/uber/jaeger-client-go/tracer_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/transport_udp_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/utils/http_json_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/utils/rate_limiter_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/utils/utils_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/zipkin_test.go create mode 100644 vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span_test.go create mode 100644 vendor/github.com/uber/jaeger-lib/.gitignore create mode 100644 vendor/github.com/uber/jaeger-lib/.travis.yml create mode 100644 vendor/github.com/uber/jaeger-lib/CHANGELOG.md create mode 100644 vendor/github.com/uber/jaeger-lib/CONTRIBUTING.md create mode 100644 vendor/github.com/uber/jaeger-lib/DCO create mode 100644 vendor/github.com/uber/jaeger-lib/Makefile create mode 100644 vendor/github.com/uber/jaeger-lib/README.md create mode 100644 vendor/github.com/uber/jaeger-lib/glide.lock create mode 100644 vendor/github.com/uber/jaeger-lib/glide.yaml create mode 100644 vendor/github.com/uber/jaeger-lib/metrics/local_test.go create mode 100644 vendor/github.com/uber/jaeger-lib/metrics/metrics_test.go create mode 100644 vendor/golang.org/x/net/.gitattributes create mode 100644 vendor/golang.org/x/net/.gitignore create mode 100644 vendor/golang.org/x/net/AUTHORS create mode 100644 vendor/golang.org/x/net/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/net/CONTRIBUTORS create mode 100644 vendor/golang.org/x/net/README create mode 100644 vendor/golang.org/x/net/codereview.cfg create mode 100644 vendor/golang.org/x/net/context/context_test.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go create mode 100644 vendor/golang.org/x/net/context/withtimeout_test.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/per_host_test.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/proxy_test.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/golang.org/x/sys/.gitattributes create mode 100644 vendor/golang.org/x/sys/.gitignore create mode 100644 vendor/golang.org/x/sys/AUTHORS create mode 100644 vendor/golang.org/x/sys/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/sys/CONTRIBUTORS rename vendor/{github.com/codeskyblue/go-uuid => golang.org/x/sys}/LICENSE (96%) create mode 100644 vendor/golang.org/x/sys/PATENTS create mode 100644 vendor/golang.org/x/sys/README.md create mode 100644 vendor/golang.org/x/sys/codereview.cfg create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s create mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 vendor/golang.org/x/sys/windows/dll_windows.go create mode 100644 vendor/golang.org/x/sys/windows/env_unset.go create mode 100644 vendor/golang.org/x/sys/windows/env_windows.go create mode 100644 vendor/golang.org/x/sys/windows/eventlog.go create mode 100644 vendor/golang.org/x/sys/windows/exec_windows.go create mode 100644 vendor/golang.org/x/sys/windows/memory_windows.go create mode 100644 vendor/golang.org/x/sys/windows/mksyscall.go create mode 100644 vendor/golang.org/x/sys/windows/race.go create mode 100644 vendor/golang.org/x/sys/windows/race0.go create mode 100644 vendor/golang.org/x/sys/windows/security_windows.go create mode 100644 vendor/golang.org/x/sys/windows/service.go create mode 100644 vendor/golang.org/x/sys/windows/str.go create mode 100644 vendor/golang.org/x/sys/windows/syscall.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_test.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows.go create mode 100644 vendor/golang.org/x/sys/windows/syscall_windows_test.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_386.go create mode 100644 vendor/golang.org/x/sys/windows/types_windows_amd64.go create mode 100644 vendor/golang.org/x/sys/windows/zsyscall_windows.go create mode 100644 vendor/gopkg.in/alexcesaro/statsd.v1/.travis.yml create mode 100644 vendor/gopkg.in/alexcesaro/statsd.v1/LICENSE create mode 100644 vendor/gopkg.in/alexcesaro/statsd.v1/README.md create mode 100644 vendor/gopkg.in/alexcesaro/statsd.v1/doc.go create mode 100644 vendor/gopkg.in/alexcesaro/statsd.v1/examples_test.go create mode 100644 vendor/gopkg.in/alexcesaro/statsd.v1/statsd.go create mode 100644 vendor/gopkg.in/alexcesaro/statsd.v1/statsd_test.go create mode 100644 vendor/gopkg.in/inf.v0/benchmark_test.go create mode 100644 vendor/gopkg.in/inf.v0/dec_go1_2_test.go create mode 100644 vendor/gopkg.in/inf.v0/dec_internal_test.go create mode 100644 vendor/gopkg.in/inf.v0/dec_test.go create mode 100644 vendor/gopkg.in/inf.v0/example_test.go create mode 100644 vendor/gopkg.in/inf.v0/rounder_example_test.go create mode 100644 vendor/gopkg.in/inf.v0/rounder_test.go create mode 100644 vendor/gopkg.in/ini.v1/.gitignore create mode 100644 vendor/gopkg.in/ini.v1/.travis.yml create mode 100644 vendor/gopkg.in/ini.v1/ini_test.go create mode 100644 vendor/gopkg.in/ini.v1/key_test.go create mode 100644 vendor/gopkg.in/ini.v1/section_test.go create mode 100644 vendor/gopkg.in/ini.v1/struct_test.go create mode 100644 vendor/gopkg.in/macaron.v1/.gitignore create mode 100644 vendor/gopkg.in/macaron.v1/.travis.yml create mode 100644 vendor/gopkg.in/macaron.v1/context_test.go create mode 100644 vendor/gopkg.in/macaron.v1/logger_test.go create mode 100644 vendor/gopkg.in/macaron.v1/macaron_test.go create mode 100644 vendor/gopkg.in/macaron.v1/recovery_test.go create mode 100644 vendor/gopkg.in/macaron.v1/render_test.go create mode 100644 vendor/gopkg.in/macaron.v1/response_writer_test.go create mode 100644 vendor/gopkg.in/macaron.v1/return_handler_test.go create mode 100644 vendor/gopkg.in/macaron.v1/router_test.go create mode 100644 vendor/gopkg.in/macaron.v1/static_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v0/event_gen_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v0/metric_gen_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v0/metric_serialization_bench_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v0/reslice_test.go delete mode 100644 vendor/gopkg.in/raintank/schema.v1/event.go delete mode 100644 vendor/gopkg.in/raintank/schema.v1/event_gen.go create mode 100644 vendor/gopkg.in/raintank/schema.v1/metric_gen_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v1/metric_serialization_bench_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v1/metric_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v1/point_gen_test.go create mode 100644 vendor/gopkg.in/raintank/schema.v1/reslice_test.go delete mode 100644 vendor/vendor.json diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000000..88a3f3b1ed --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,454 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/DataDog/datadog-go" + packages = ["statsd"] + revision = "0ddda6bee21174ef6c4873647cb0d6ec9cba996f" + version = "1.1.0" + +[[projects]] + branch = "master" + name = "github.com/Dieterbe/artisanalhistogram" + packages = ["hist12h","hist15s"] + revision = "f61b7225d304620f6f2c5cbb21435d0957115429" + +[[projects]] + name = "github.com/Dieterbe/profiletrigger" + packages = ["heap"] + revision = "d90c4b0cfeed756381675e85cc6e6b8a02cb01a6" + +[[projects]] + name = "github.com/Microsoft/go-winio" + packages = ["."] + revision = "78439966b38d69bf38227fbf57ac8a6fee70f69a" + version = "v0.4.5" + +[[projects]] + name = "github.com/Shopify/sarama" + packages = ["."] + revision = "bd61cae2be85fa6ff40eb23dcdd24567967ac2ae" + version = "v1.10.1" + +[[projects]] + name = "github.com/Sirupsen/logrus" + packages = ["."] + revision = "55eb11d21d2a31a3cc93838241d04800f52e823d" + version = "v0.7.3" + +[[projects]] + name = "github.com/Unknwon/com" + packages = ["."] + revision = "28b053d5a2923b87ce8c5a08f3af779894a72758" + version = "v1" + +[[projects]] + branch = "master" + name = "github.com/alyu/configparser" + packages = ["."] + revision = "26b2fe18bee125de2a3090d6fadb7e280e63eba6" + +[[projects]] + name = "github.com/apache/thrift" + packages = ["lib/go/thrift"] + revision = "b2a4d4ae21c789b689dd162deb819665567f481c" + version = "0.10.0" + +[[projects]] + name = "github.com/araddon/gou" + packages = ["."] + revision = "cf9cf25f52be174c5878920a8021bd224cbe32c7" + +[[projects]] + name = "github.com/armon/go-metrics" + packages = ["."] + revision = "06b60999766278efd6d2b5d8418a58c3d5b99e87" + +[[projects]] + name = "github.com/bitly/go-hostpool" + packages = ["."] + revision = "d0e59c22a56e8dadfed24f74f452cea5a52722d2" + +[[projects]] + name = "github.com/bsm/sarama-cluster" + packages = ["."] + revision = "11887f57ba85b075757463e9a4ffcfb0851ddff3" + +[[projects]] + branch = "master" + name = "github.com/codahale/hdrhistogram" + packages = ["."] + revision = "3a0bb77429bd3a61596f5e8a3172445844342120" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + name = "github.com/dgryski/go-bits" + packages = ["."] + revision = "2c7641e7dfe3945a0fe755f58c85ab306624956d" + +[[projects]] + name = "github.com/dgryski/go-linlog" + packages = ["."] + revision = "f18bb8a4e7bcd60fd4fb99f3e8752f5da20f70a2" + +[[projects]] + name = "github.com/dgryski/go-tsz" + packages = [".","testdata"] + revision = "5f4c484a9e838989c201ff437b3cab59cfdc46c8" + +[[projects]] + name = "github.com/docker/distribution" + packages = ["digest","reference"] + revision = "48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89" + version = "v2.6.2" + +[[projects]] + name = "github.com/docker/docker" + packages = ["api/types","api/types/blkiodev","api/types/container","api/types/events","api/types/filters","api/types/mount","api/types/network","api/types/reference","api/types/registry","api/types/strslice","api/types/swarm","api/types/time","api/types/versions","api/types/volume","client","pkg/tlsconfig"] + revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" + version = "v1.13.1" + +[[projects]] + name = "github.com/docker/go-connections" + packages = ["nat","sockets","tlsconfig"] + revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d" + version = "v0.3.0" + +[[projects]] + name = "github.com/docker/go-units" + packages = ["."] + revision = "0dadbb0345b35ec7ef35e228dabb8de89a65bf52" + version = "v0.3.2" + +[[projects]] + name = "github.com/eapache/go-resiliency" + packages = ["breaker"] + revision = "b86b1ec0dd4209a588dc1285cdd471e73525c0b3" + +[[projects]] + branch = "master" + name = "github.com/eapache/go-xerial-snappy" + packages = ["."] + revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c" + +[[projects]] + name = "github.com/eapache/queue" + packages = ["."] + revision = "ded5959c0d4e360646dc9e9908cff48666781367" + version = "v1.0.2" + +[[projects]] + name = "github.com/go-macaron/binding" + packages = ["."] + revision = "a453235199f8898e01647db8820f937a184fbd09" + +[[projects]] + branch = "master" + name = "github.com/go-macaron/inject" + packages = ["."] + revision = "d8a0b8677191f4380287cfebd08e462217bac7ad" + +[[projects]] + name = "github.com/gocql/gocql" + packages = [".","internal/lru","internal/murmur","internal/streams"] + revision = "066e974c166d59aa2d3aee45b234d8c21c631180" + +[[projects]] + branch = "master" + name = "github.com/golang/snappy" + packages = ["."] + revision = "553a641470496b2327abcac10b36396bd98e45c9" + +[[projects]] + branch = "master" + name = "github.com/google/go-querystring" + packages = ["query"] + revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" + +[[projects]] + name = "github.com/gopherjs/gopherjs" + packages = ["js"] + revision = "4b53e1bddba0e2f734514aeb6c02db652f4c6fe8" + +[[projects]] + name = "github.com/graphite-ng/carbon-relay-ng" + packages = ["clock"] + revision = "5042de59dc34938f7f24e8f0c2452fa10ae30378" + version = "v0.9.3" + +[[projects]] + branch = "master" + name = "github.com/hailocab/go-hostpool" + packages = ["."] + revision = "e80d13ce29ede4452c43dea11e79b9bc8a15b478" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/errwrap" + packages = ["."] + revision = "7554cd9344cec97297fa6649b055a8c98c2a1e55" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-msgpack" + packages = ["codec"] + revision = "fa3f63826f7c23912c15263591e65d54d080b458" + +[[projects]] + name = "github.com/hashicorp/go-multierror" + packages = ["."] + revision = "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-sockaddr" + packages = ["."] + revision = "9b4c5fa5b10a683339a270d664474b9f4aee62fc" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/memberlist" + packages = ["."] + revision = "9bdd37bfb26bd039c08b0f36be6f80ceede4aaf3" + +[[projects]] + name = "github.com/jpillora/backoff" + packages = ["."] + revision = "06c7a16c845dc8e0bf575fafeeca0f5462f5eb4d" + +[[projects]] + name = "github.com/jtolds/gls" + packages = ["."] + revision = "8ddce2a84170772b95dd5d576c48d517b22cac63" + version = "v4.2.0" + +[[projects]] + name = "github.com/kisielk/og-rek" + packages = ["."] + revision = "ec792bc6e6aa06a6c490e8d292e15cca173c8bd3" + +[[projects]] + branch = "master" + name = "github.com/kisielk/whisper-go" + packages = ["whisper"] + revision = "82e8091afdea241119c34a452fe24fcc2a0b962e" + +[[projects]] + name = "github.com/klauspost/compress" + packages = ["flate","gzip"] + revision = "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6" + +[[projects]] + name = "github.com/klauspost/cpuid" + packages = ["."] + revision = "09cded8978dc9e80714c4d85b0322337b0a1e5e0" + version = "v1.0" + +[[projects]] + name = "github.com/klauspost/crc32" + packages = ["."] + revision = "6834731faf32e62a2dd809d99fb24d1e4ae5a92d" + +[[projects]] + name = "github.com/mattbaird/elastigo" + packages = ["lib"] + revision = "34c4c4d8425cbdcbc8e257943a2044d5e9f7dab5" + +[[projects]] + branch = "master" + name = "github.com/metrics20/go-metrics20" + packages = ["carbon20"] + revision = "27c134d83f76d8d6d233e52c4d628e2ee2634d75" + +[[projects]] + name = "github.com/miekg/dns" + packages = ["."] + revision = "48c8acaf0c2dc19fbb4f1b2776c1cee4e6f65aa0" + +[[projects]] + branch = "master" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" + +[[projects]] + name = "github.com/mreiferson/go-snappystream" + packages = [".","snappy-go"] + revision = "028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504" + version = "v0.2.3" + +[[projects]] + name = "github.com/nsqio/go-nsq" + packages = ["."] + revision = "642a3f9935f12cb3b747294318d730f56f4c34b4" + +[[projects]] + name = "github.com/opentracing/opentracing-go" + packages = [".","ext","log"] + revision = "8ebe5d4e236eed9fd88e593c288bfb804d630b8c" + +[[projects]] + name = "github.com/philhofer/fwd" + packages = ["."] + revision = "92647f2bd94a89b170c19e96e6456dd64ac37e1a" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + branch = "v2" + name = "github.com/raintank/dur" + packages = ["."] + revision = "6ce9ec78e3a2ee918588104869369e5527aefe88" + +[[projects]] + branch = "master" + name = "github.com/raintank/gziper" + packages = ["."] + revision = "ad70bdb176fa32c61c29d21cd053bdfd9f0ec329" + +[[projects]] + branch = "master" + name = "github.com/raintank/met" + packages = [".","dogstatsd","helper","statsd"] + revision = "05a94bb32ad1f23f4b01edb2edd06862d4a484d2" + +[[projects]] + branch = "master" + name = "github.com/raintank/misc" + packages = ["app"] + revision = "cb73203311ae2489b845469f622678f24ff98a16" + +[[projects]] + name = "github.com/raintank/worldping-api" + packages = ["pkg/log"] + revision = "66b28f1160d1e0d7d236b5bd96a5d8eee627357c" + +[[projects]] + name = "github.com/rakyll/globalconf" + packages = ["."] + revision = "415abc325023f1a00cd2d9fa512e0e71745791a2" + +[[projects]] + branch = "master" + name = "github.com/rakyll/goini" + packages = ["."] + revision = "907cca0f578a5316fb864ec6992dc3d9730ec58c" + +[[projects]] + name = "github.com/rs/cors" + packages = ["."] + revision = "a62a804a8a009876ca59105f7899938a1349f4b3" + version = "v1.0" + +[[projects]] + name = "github.com/rs/xhandler" + packages = ["."] + revision = "ed27b6fd65218132ee50cd95f38474a3d8a2cd12" + version = "v.11" + +[[projects]] + branch = "master" + name = "github.com/sean-/seed" + packages = ["."] + revision = "e2103e2c35297fb7e17febb81e49b312087a2372" + +[[projects]] + name = "github.com/sergi/go-diff" + packages = ["diffmatchpatch"] + revision = "feef008d51ad2b3778f85d387ccf91735543008d" + +[[projects]] + name = "github.com/smartystreets/assertions" + packages = [".","internal/go-render/render","internal/oglematchers"] + revision = "443d812296a84445c202c085f19e18fc238f8250" + +[[projects]] + name = "github.com/smartystreets/goconvey" + packages = ["convey","convey/gotest","convey/reporting"] + revision = "995f5b2e021c69b8b028ba6d0b05c1dd500783db" + +[[projects]] + name = "github.com/syndtr/goleveldb" + packages = ["leveldb","leveldb/cache","leveldb/comparer","leveldb/errors","leveldb/filter","leveldb/iterator","leveldb/journal","leveldb/memdb","leveldb/opt","leveldb/storage","leveldb/table","leveldb/util"] + revision = "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d" + +[[projects]] + name = "github.com/tinylib/msgp" + packages = ["msgp"] + revision = "0cea1fa86e8403be1284013014f87ab942056de8" + version = "v1.0-beta" + +[[projects]] + name = "github.com/uber/jaeger-client-go" + packages = [".","config","internal/baggage","internal/baggage/remote","internal/spanlog","log","rpcmetrics","thrift-gen/agent","thrift-gen/baggage","thrift-gen/jaeger","thrift-gen/sampling","thrift-gen/zipkincore","utils"] + revision = "ff3efa227b65e419701a4f48985379ca106a89e7" + version = "v2.11.0" + +[[projects]] + name = "github.com/uber/jaeger-lib" + packages = ["metrics"] + revision = "c48167d9cae5887393dd5e61efd06a4a48b7fbb3" + version = "v1.2.1" + +[[projects]] + name = "golang.org/x/net" + packages = ["context","context/ctxhttp","proxy"] + revision = "1aafd77e1e7f6849ad16a7bdeb65e3589a10b2bb" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["windows"] + revision = "8b4580aae2a0dd0c231a45d3ccb8434ff533b840" + +[[projects]] + branch = "v1" + name = "gopkg.in/alexcesaro/statsd.v1" + packages = ["."] + revision = "c289775e46fd4576434c2ac3f3298ebe1f0a6ab7" + +[[projects]] + name = "gopkg.in/inf.v0" + packages = ["."] + revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" + version = "v0.9.0" + +[[projects]] + name = "gopkg.in/ini.v1" + packages = ["."] + revision = "6e4869b434bd001f6983749881c7ead3545887d8" + version = "v1.21.1" + +[[projects]] + name = "gopkg.in/macaron.v1" + packages = ["."] + revision = "4974334b10dbb6f5c0e17f4c10555ff050a16329" + version = "v1.1.8" + +[[projects]] + name = "gopkg.in/raintank/schema.v0" + packages = ["."] + revision = "b5eb018b887ddff47b9152f28f6f1513be9b2141" + version = "v0.0.1" + +[[projects]] + name = "gopkg.in/raintank/schema.v1" + packages = [".","msg"] + revision = "c068e0c9c054af19c858b2d947741180c09ea6b1" + version = "v1.6" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "2fe2c355b530fd45d921c7f013797dc6f0006345642e7c89d4b640b7ed603d89" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000000..c77a911bc0 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,164 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/Dieterbe/artisanalhistogram" + +[[constraint]] + name = "github.com/Dieterbe/profiletrigger" + +[[constraint]] + name = "github.com/Shopify/sarama" + version = "1.10.1" + +[[constraint]] + name = "github.com/Sirupsen/logrus" + version = "0.7.3" + +[[constraint]] + branch = "master" + name = "github.com/alyu/configparser" + +[[constraint]] + name = "github.com/bitly/go-hostpool" + +[[constraint]] + name = "github.com/bsm/sarama-cluster" + +[[constraint]] + name = "github.com/davecgh/go-spew" + version = "1.1.0" + +[[constraint]] + name = "github.com/dgryski/go-linlog" + +[[constraint]] + name = "github.com/dgryski/go-tsz" + +[[constraint]] + name = "github.com/docker/docker" + +[[constraint]] + name = "github.com/go-macaron/binding" + +[[constraint]] + name = "github.com/gocql/gocql" + +[[constraint]] + branch = "master" + name = "github.com/golang/snappy" + +[[constraint]] + branch = "master" + name = "github.com/google/go-querystring" + +[[constraint]] + name = "github.com/graphite-ng/carbon-relay-ng" + version = "0.9.3" + +[[constraint]] + branch = "master" + name = "github.com/hailocab/go-hostpool" + +[[constraint]] + branch = "master" + name = "github.com/hashicorp/memberlist" + +[[constraint]] + name = "github.com/jpillora/backoff" + +[[constraint]] + name = "github.com/kisielk/og-rek" + +[[constraint]] + branch = "master" + name = "github.com/kisielk/whisper-go" + +[[constraint]] + name = "github.com/mattbaird/elastigo" + +[[constraint]] + branch = "master" + name = "github.com/metrics20/go-metrics20" + +[[constraint]] + branch = "master" + name = "github.com/mitchellh/go-homedir" + +[[constraint]] + name = "github.com/nsqio/go-nsq" + +[[constraint]] + name = "github.com/opentracing/opentracing-go" + +[[constraint]] + branch = "v2" + name = "github.com/raintank/dur" + +[[constraint]] + branch = "master" + name = "github.com/raintank/gziper" + +[[constraint]] + branch = "master" + name = "github.com/raintank/met" + +[[constraint]] + branch = "master" + name = "github.com/raintank/misc" + +[[constraint]] + name = "github.com/raintank/worldping-api" + +[[constraint]] + name = "github.com/rakyll/globalconf" + +[[constraint]] + name = "github.com/rs/cors" + version = "1.0.0" + +[[constraint]] + name = "github.com/sergi/go-diff" + +[[constraint]] + name = "github.com/smartystreets/goconvey" + +[[constraint]] + name = "github.com/syndtr/goleveldb" + +[[constraint]] + name = "github.com/tinylib/msgp" + version = "1.0.0-beta" + +[[constraint]] + name = "github.com/uber/jaeger-client-go" + +[[constraint]] + name = "gopkg.in/macaron.v1" + version = "1.1.8" + +[[constraint]] + name = "gopkg.in/raintank/schema.v0" + version = "0.0.1" + +[[constraint]] + name = "gopkg.in/raintank/schema.v1" diff --git a/docs/development.md b/docs/development.md index a029f76e37..5e4c5d18d7 100644 --- a/docs/development.md +++ b/docs/development.md @@ -1,5 +1,5 @@ # Development -* [govendor](https://github.com/kardianos/govendor) for managing vendored depedencies +* [dep](https://github.com/golang/dep) for managing vendored depedencies * `go build` to build * [metrics2docs](https://github.com/Dieterbe/metrics2docs) generates the metrics documentation for the [metrics page](https://github.com/grafana/metrictank/blob/master/docs/metrics.md) diff --git a/scripts/vendor_health.sh b/scripts/vendor_health.sh index 596a003261..4b8699bcb1 100755 --- a/scripts/vendor_health.sh +++ b/scripts/vendor_health.sh @@ -1,20 +1,19 @@ #!/bin/bash -if ! which govendor >/dev/null; then - go get github.com/kardianos/govendor || exit 1 +if ! which dep >/dev/null; then + go get -u github.com/golang/dep/cmd/dep || exit 1 fi -ret=0 +dep version -external=$(govendor list +external) -missing=$(govendor list +missing) -unused=$(govendor list +unused) +# until we have https://docs.google.com/document/d/1j_Hka8eFKqWwGJWFSFedtBsNkFaRN3yvL4g8k30PLmg/edit# +# this should do fine: +# (note dep ensure -dry-run and dep ensure would add a whole bunch of packages to vendor, which dep prune deletes again, so we can't just check those) +# we can expect this to change soon though: https://github.com/golang/dep/issues/944 -[ -n "$external" ] && ret=1 && echo -e "packages missing in vendor that are in gopath:\n$external\n" -[ -n "$missing" ] && ret=1 && echo -e "packages missing in vendor that are not found:\n$missing\n" -[ -n "$unused" ] && ret=1 && echo -e "unused vendored packages found:\n$unused\n" +dep ensure -no-vendor -dry-run +ret=$? -echo govendor list: && govendor list -echo -n "govendor version: " && govendor -version +dep status exit $ret diff --git a/vendor/github.com/DataDog/datadog-go/.travis.yml b/vendor/github.com/DataDog/datadog-go/.travis.yml new file mode 100644 index 0000000000..a162f86790 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.4 + - 1.5 + +script: + - go test -v ./... diff --git a/vendor/github.com/DataDog/datadog-go/CHANGELOG.md b/vendor/github.com/DataDog/datadog-go/CHANGELOG.md new file mode 100644 index 0000000000..f15422a38a --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/CHANGELOG.md @@ -0,0 +1,54 @@ +Changes +======= + +# 1.1.0 / Unreleased + +### Notes + +* [FEATURE] Export serviceCheckStatus allowing interfaces to statsd.Client. See [#19][] (Thanks [@Jasrags][]) +* [FEATURE] Client.sendMsg(). Check payload length for all messages. See [#25][] (Thanks [@theckman][]) +* [BUGFIX] Remove new lines from tags. See [#21][] (Thanks [@sjung-stripe][]) +* [BUGFIX] Do not panic on Client.Event when `nil`. See [#28][] +* [DOCUMENTATION] Update `decr` documentation to match implementation. See [#30][] (Thanks [@kcollasarundell][]) + + +# 1.0.0 / 2016-08-22 + +### Details +We hadn't been properly versioning this project. We will begin to do so with this +`1.0.0` release. We had some contributions in the past and would like to thank the +contributors [@aviau][], [@sschepens][], [@jovanbrakus][], [@abtris][], [@tummychow][], [@gphat][], [@diasjorge][], +[@victortrac][], [@seiffert][] and [@w-vi][], in no particular order, for their work. + +Below, for reference, the latest improvements made in 07/2016 - 08/2016 + +### Notes + +* [FEATURE] Implemented support for service checks. See [#17][] and [#5][]. (Thanks [@jovanbrakus][] and [@diasjorge][]). +* [FEATURE] Add Incr, Decr, Timing and more docs.. See [#15][]. (Thanks [@gphat][]) +* [BUGFIX] Do not append to shared slice. See [#16][]. (Thanks [@tummychow][]) + + +[#5]: https://github.com/DataDog/datadog-go/issues/5 +[#15]: https://github.com/DataDog/datadog-go/issues/15 +[#16]: https://github.com/DataDog/datadog-go/issues/16 +[#17]: https://github.com/DataDog/datadog-go/issues/17 +[#19]: https://github.com/DataDog/datadog-go/issues/19 +[#21]: https://github.com/DataDog/datadog-go/issues/21 +[#25]: https://github.com/DataDog/datadog-go/issues/25 +[#28]: https://github.com/DataDog/datadog-go/issues/28 +[#30]: https://github.com/DataDog/datadog-go/issues/30 +[@Jasrags]: https://github.com/Jasrags +[@abtris]: https://github.com/abtris +[@aviau]: https://github.com/aviau +[@diasjorge]: https://github.com/diasjorge +[@gphat]: https://github.com/gphat +[@jovanbrakus]: https://github.com/jovanbrakus +[@kcollasarundell]: https://github.com/kcollasarundell +[@seiffert]: https://github.com/seiffert +[@sjung-stripe]: https://github.com/sjung-stripe +[@sschepens]: https://github.com/sschepens +[@theckman]: https://github.com/theckman +[@tummychow]: https://github.com/tummychow +[@victortrac]: https://github.com/victortrac +[@w-vi]: https://github.com/w-vi \ No newline at end of file diff --git a/vendor/github.com/DataDog/datadog-go/LICENSE.txt b/vendor/github.com/DataDog/datadog-go/LICENSE.txt new file mode 100644 index 0000000000..97cd06d7fb --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2015 Datadog, Inc + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/DataDog/datadog-go/README.md b/vendor/github.com/DataDog/datadog-go/README.md new file mode 100644 index 0000000000..5ab61ede43 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/README.md @@ -0,0 +1,33 @@ +[![Build Status](https://travis-ci.org/DataDog/datadog-go.svg?branch=master)](https://travis-ci.org/DataDog/datadog-go) +# Overview + +Packages in `datadog-go` provide Go clients for various APIs at [DataDog](http://datadoghq.com). + +## Statsd + +[![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/DataDog/datadog-go/statsd) +[![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](http://opensource.org/licenses/MIT) + +The [statsd](https://github.com/DataDog/datadog-go/tree/master/statsd) package provides a client for +[dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/): + +```go +import "github.com/DataDog/datadog-go/statsd" + +func main() { + c, err := statsd.New("127.0.0.1:8125") + if err != nil { + log.Fatal(err) + } + // prefix every metric with the app name + c.Namespace = "flubber." + // send the EC2 availability zone as a tag with every metric + c.Tags = append(c.Tags, "us-east-1a") + err = c.Gauge("request.duration", 1.2, nil, 1) + // ... +} +``` + +## License + +All code distributed under the [MIT License](http://opensource.org/licenses/MIT) unless otherwise specified. diff --git a/vendor/github.com/DataDog/datadog-go/statsd/README.md b/vendor/github.com/DataDog/datadog-go/statsd/README.md new file mode 100644 index 0000000000..2e89777633 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/README.md @@ -0,0 +1,52 @@ +## Overview + +Package `statsd` provides a Go [dogstatsd](http://docs.datadoghq.com/guides/dogstatsd/) client. Dogstatsd extends Statsd, adding tags +and histograms. + +## Get the code + + $ go get github.com/DataDog/datadog-go/statsd + +## Usage + +```go +// Create the client +c, err := statsd.New("127.0.0.1:8125") +if err != nil { + log.Fatal(err) +} +// Prefix every metric with the app name +c.Namespace = "flubber." +// Send the EC2 availability zone as a tag with every metric +c.Tags = append(c.Tags, "us-east-1a") + +// Do some metrics! +err = c.Gauge("request.queue_depth", 12, nil, 1) +err = c.Timing("request.duration", duration, nil, 1) // Uses a time.Duration! +err = c.TimeInMilliseconds("request", 12, nil, 1) +err = c.Incr("request.count_total", nil, 1) +err = c.Decr("request.count_total", nil, 1) +err = c.Count("request.count_total", 2, nil, 1) +``` + +## Buffering Client + +DogStatsD accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec. + +## Development + +Run the tests with: + + $ go test + +## Documentation + +Please see: http://godoc.org/github.com/DataDog/datadog-go/statsd + +## License + +go-dogstatsd is released under the [MIT license](http://www.opensource.org/licenses/mit-license.php). + +## Credits + +Original code by [ooyala](https://github.com/ooyala/go-dogstatsd). diff --git a/vendor/github.com/DataDog/datadog-go/statsd/statsd.go b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go new file mode 100644 index 0000000000..b8cf430af8 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/statsd.go @@ -0,0 +1,580 @@ +// Copyright 2013 Ooyala, Inc. + +/* +Package statsd provides a Go dogstatsd client. Dogstatsd extends the popular statsd, +adding tags and histograms and pushing upstream to Datadog. + +Refer to http://docs.datadoghq.com/guides/dogstatsd/ for information about DogStatsD. + +Example Usage: + + // Create the client + c, err := statsd.New("127.0.0.1:8125") + if err != nil { + log.Fatal(err) + } + // Prefix every metric with the app name + c.Namespace = "flubber." + // Send the EC2 availability zone as a tag with every metric + c.Tags = append(c.Tags, "us-east-1a") + err = c.Gauge("request.duration", 1.2, nil, 1) + +statsd is based on go-statsd-client. +*/ +package statsd + +import ( + "bytes" + "errors" + "fmt" + "io" + "math/rand" + "net" + "strconv" + "strings" + "sync" + "time" +) + +/* +OptimalPayloadSize defines the optimal payload size for a UDP datagram, 1432 bytes +is optimal for regular networks with an MTU of 1500 so datagrams don't get +fragmented. It's generally recommended not to fragment UDP datagrams as losing +a single fragment will cause the entire datagram to be lost. + +This can be increased if your network has a greater MTU or you don't mind UDP +datagrams getting fragmented. The practical limit is MaxUDPPayloadSize +*/ +const OptimalPayloadSize = 1432 + +/* +MaxUDPPayloadSize defines the maximum payload size for a UDP datagram. +Its value comes from the calculation: 65535 bytes Max UDP datagram size - +8byte UDP header - 60byte max IP headers +any number greater than that will see frames being cut out. +*/ +const MaxUDPPayloadSize = 65467 + +// A Client is a handle for sending udp messages to dogstatsd. It is safe to +// use one Client from multiple goroutines simultaneously. +type Client struct { + conn net.Conn + // Namespace to prepend to all statsd calls + Namespace string + // Tags are global tags to be added to every statsd call + Tags []string + // BufferLength is the length of the buffer in commands. + bufferLength int + flushTime time.Duration + commands []string + buffer bytes.Buffer + stop bool + sync.Mutex +} + +// New returns a pointer to a new Client given an addr in the format "hostname:port". +func New(addr string) (*Client, error) { + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + return nil, err + } + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + return nil, err + } + client := &Client{conn: conn} + return client, nil +} + +// NewBuffered returns a Client that buffers its output and sends it in chunks. +// Buflen is the length of the buffer in number of commands. +func NewBuffered(addr string, buflen int) (*Client, error) { + client, err := New(addr) + if err != nil { + return nil, err + } + client.bufferLength = buflen + client.commands = make([]string, 0, buflen) + client.flushTime = time.Millisecond * 100 + go client.watch() + return client, nil +} + +// format a message from its name, value, tags and rate. Also adds global +// namespace and tags. +func (c *Client) format(name, value string, tags []string, rate float64) string { + var buf bytes.Buffer + if c.Namespace != "" { + buf.WriteString(c.Namespace) + } + buf.WriteString(name) + buf.WriteString(":") + buf.WriteString(value) + if rate < 1 { + buf.WriteString(`|@`) + buf.WriteString(strconv.FormatFloat(rate, 'f', -1, 64)) + } + + writeTagString(&buf, c.Tags, tags) + + return buf.String() +} + +func (c *Client) watch() { + for _ = range time.Tick(c.flushTime) { + if c.stop { + return + } + c.Lock() + if len(c.commands) > 0 { + // FIXME: eating error here + c.flush() + } + c.Unlock() + } +} + +func (c *Client) append(cmd string) error { + c.Lock() + defer c.Unlock() + c.commands = append(c.commands, cmd) + // if we should flush, lets do it + if len(c.commands) == c.bufferLength { + if err := c.flush(); err != nil { + return err + } + } + return nil +} + +func (c *Client) joinMaxSize(cmds []string, sep string, maxSize int) ([][]byte, []int) { + c.buffer.Reset() //clear buffer + + var frames [][]byte + var ncmds []int + sepBytes := []byte(sep) + sepLen := len(sep) + + elem := 0 + for _, cmd := range cmds { + needed := len(cmd) + + if elem != 0 { + needed = needed + sepLen + } + + if c.buffer.Len()+needed <= maxSize { + if elem != 0 { + c.buffer.Write(sepBytes) + } + c.buffer.WriteString(cmd) + elem++ + } else { + frames = append(frames, copyAndResetBuffer(&c.buffer)) + ncmds = append(ncmds, elem) + // if cmd is bigger than maxSize it will get flushed on next loop + c.buffer.WriteString(cmd) + elem = 1 + } + } + + //add whatever is left! if there's actually something + if c.buffer.Len() > 0 { + frames = append(frames, copyAndResetBuffer(&c.buffer)) + ncmds = append(ncmds, elem) + } + + return frames, ncmds +} + +func copyAndResetBuffer(buf *bytes.Buffer) []byte { + tmpBuf := make([]byte, buf.Len()) + copy(tmpBuf, buf.Bytes()) + buf.Reset() + return tmpBuf +} + +// flush the commands in the buffer. Lock must be held by caller. +func (c *Client) flush() error { + frames, flushable := c.joinMaxSize(c.commands, "\n", OptimalPayloadSize) + var err error + cmdsFlushed := 0 + for i, data := range frames { + _, e := c.conn.Write(data) + if e != nil { + err = e + break + } + cmdsFlushed += flushable[i] + } + + // clear the slice with a slice op, doesn't realloc + if cmdsFlushed == len(c.commands) { + c.commands = c.commands[:0] + } else { + //this case will cause a future realloc... + // drop problematic command though (sorry). + c.commands = c.commands[cmdsFlushed+1:] + } + return err +} + +func (c *Client) sendMsg(msg string) error { + // return an error if message is bigger than MaxUDPPayloadSize + if len(msg) > MaxUDPPayloadSize { + return errors.New("message size exceeds MaxUDPPayloadSize") + } + + // if this client is buffered, then we'll just append this + if c.bufferLength > 0 { + return c.append(msg) + } + + _, err := c.conn.Write([]byte(msg)) + return err +} + +// send handles sampling and sends the message over UDP. It also adds global namespace prefixes and tags. +func (c *Client) send(name, value string, tags []string, rate float64) error { + if c == nil { + return nil + } + if rate < 1 && rand.Float64() > rate { + return nil + } + data := c.format(name, value, tags, rate) + return c.sendMsg(data) +} + +// Gauge measures the value of a metric at a particular time. +func (c *Client) Gauge(name string, value float64, tags []string, rate float64) error { + stat := fmt.Sprintf("%f|g", value) + return c.send(name, stat, tags, rate) +} + +// Count tracks how many times something happened per second. +func (c *Client) Count(name string, value int64, tags []string, rate float64) error { + stat := fmt.Sprintf("%d|c", value) + return c.send(name, stat, tags, rate) +} + +// Histogram tracks the statistical distribution of a set of values. +func (c *Client) Histogram(name string, value float64, tags []string, rate float64) error { + stat := fmt.Sprintf("%f|h", value) + return c.send(name, stat, tags, rate) +} + +// Decr is just Count of -1 +func (c *Client) Decr(name string, tags []string, rate float64) error { + return c.send(name, "-1|c", tags, rate) +} + +// Incr is just Count of 1 +func (c *Client) Incr(name string, tags []string, rate float64) error { + return c.send(name, "1|c", tags, rate) +} + +// Set counts the number of unique elements in a group. +func (c *Client) Set(name string, value string, tags []string, rate float64) error { + stat := fmt.Sprintf("%s|s", value) + return c.send(name, stat, tags, rate) +} + +// Timing sends timing information, it is an alias for TimeInMilliseconds +func (c *Client) Timing(name string, value time.Duration, tags []string, rate float64) error { + return c.TimeInMilliseconds(name, value.Seconds()*1000, tags, rate) +} + +// TimeInMilliseconds sends timing information in milliseconds. +// It is flushed by statsd with percentiles, mean and other info (https://github.com/etsy/statsd/blob/master/docs/metric_types.md#timing) +func (c *Client) TimeInMilliseconds(name string, value float64, tags []string, rate float64) error { + stat := fmt.Sprintf("%f|ms", value) + return c.send(name, stat, tags, rate) +} + +// Event sends the provided Event. +func (c *Client) Event(e *Event) error { + if c == nil { + return nil + } + stat, err := e.Encode(c.Tags...) + if err != nil { + return err + } + return c.sendMsg(stat) +} + +// SimpleEvent sends an event with the provided title and text. +func (c *Client) SimpleEvent(title, text string) error { + e := NewEvent(title, text) + return c.Event(e) +} + +// ServiceCheck sends the provided ServiceCheck. +func (c *Client) ServiceCheck(sc *ServiceCheck) error { + stat, err := sc.Encode(c.Tags...) + if err != nil { + return err + } + return c.sendMsg(stat) +} + +// SimpleServiceCheck sends an serviceCheck with the provided name and status. +func (c *Client) SimpleServiceCheck(name string, status ServiceCheckStatus) error { + sc := NewServiceCheck(name, status) + return c.ServiceCheck(sc) +} + +// Close the client connection. +func (c *Client) Close() error { + if c == nil { + return nil + } + c.stop = true + return c.conn.Close() +} + +// Events support + +type eventAlertType string + +const ( + // Info is the "info" AlertType for events + Info eventAlertType = "info" + // Error is the "error" AlertType for events + Error eventAlertType = "error" + // Warning is the "warning" AlertType for events + Warning eventAlertType = "warning" + // Success is the "success" AlertType for events + Success eventAlertType = "success" +) + +type eventPriority string + +const ( + // Normal is the "normal" Priority for events + Normal eventPriority = "normal" + // Low is the "low" Priority for events + Low eventPriority = "low" +) + +// An Event is an object that can be posted to your DataDog event stream. +type Event struct { + // Title of the event. Required. + Title string + // Text is the description of the event. Required. + Text string + // Timestamp is a timestamp for the event. If not provided, the dogstatsd + // server will set this to the current time. + Timestamp time.Time + // Hostname for the event. + Hostname string + // AggregationKey groups this event with others of the same key. + AggregationKey string + // Priority of the event. Can be statsd.Low or statsd.Normal. + Priority eventPriority + // SourceTypeName is a source type for the event. + SourceTypeName string + // AlertType can be statsd.Info, statsd.Error, statsd.Warning, or statsd.Success. + // If absent, the default value applied by the dogstatsd server is Info. + AlertType eventAlertType + // Tags for the event. + Tags []string +} + +// NewEvent creates a new event with the given title and text. Error checking +// against these values is done at send-time, or upon running e.Check. +func NewEvent(title, text string) *Event { + return &Event{ + Title: title, + Text: text, + } +} + +// Check verifies that an event is valid. +func (e Event) Check() error { + if len(e.Title) == 0 { + return fmt.Errorf("statsd.Event title is required") + } + if len(e.Text) == 0 { + return fmt.Errorf("statsd.Event text is required") + } + return nil +} + +// Encode returns the dogstatsd wire protocol representation for an event. +// Tags may be passed which will be added to the encoded output but not to +// the Event's list of tags, eg. for default tags. +func (e Event) Encode(tags ...string) (string, error) { + err := e.Check() + if err != nil { + return "", err + } + text := e.escapedText() + + var buffer bytes.Buffer + buffer.WriteString("_e{") + buffer.WriteString(strconv.FormatInt(int64(len(e.Title)), 10)) + buffer.WriteRune(',') + buffer.WriteString(strconv.FormatInt(int64(len(text)), 10)) + buffer.WriteString("}:") + buffer.WriteString(e.Title) + buffer.WriteRune('|') + buffer.WriteString(text) + + if !e.Timestamp.IsZero() { + buffer.WriteString("|d:") + buffer.WriteString(strconv.FormatInt(int64(e.Timestamp.Unix()), 10)) + } + + if len(e.Hostname) != 0 { + buffer.WriteString("|h:") + buffer.WriteString(e.Hostname) + } + + if len(e.AggregationKey) != 0 { + buffer.WriteString("|k:") + buffer.WriteString(e.AggregationKey) + + } + + if len(e.Priority) != 0 { + buffer.WriteString("|p:") + buffer.WriteString(string(e.Priority)) + } + + if len(e.SourceTypeName) != 0 { + buffer.WriteString("|s:") + buffer.WriteString(e.SourceTypeName) + } + + if len(e.AlertType) != 0 { + buffer.WriteString("|t:") + buffer.WriteString(string(e.AlertType)) + } + + writeTagString(&buffer, tags, e.Tags) + + return buffer.String(), nil +} + +// ServiceCheck support + +type ServiceCheckStatus byte + +const ( + // Ok is the "ok" ServiceCheck status + Ok ServiceCheckStatus = 0 + // Warn is the "warning" ServiceCheck status + Warn ServiceCheckStatus = 1 + // Critical is the "critical" ServiceCheck status + Critical ServiceCheckStatus = 2 + // Unknown is the "unknown" ServiceCheck status + Unknown ServiceCheckStatus = 3 +) + +// An ServiceCheck is an object that contains status of DataDog service check. +type ServiceCheck struct { + // Name of the service check. Required. + Name string + // Status of service check. Required. + Status ServiceCheckStatus + // Timestamp is a timestamp for the serviceCheck. If not provided, the dogstatsd + // server will set this to the current time. + Timestamp time.Time + // Hostname for the serviceCheck. + Hostname string + // A message describing the current state of the serviceCheck. + Message string + // Tags for the serviceCheck. + Tags []string +} + +// NewServiceCheck creates a new serviceCheck with the given name and status. Error checking +// against these values is done at send-time, or upon running sc.Check. +func NewServiceCheck(name string, status ServiceCheckStatus) *ServiceCheck { + return &ServiceCheck{ + Name: name, + Status: status, + } +} + +// Check verifies that an event is valid. +func (sc ServiceCheck) Check() error { + if len(sc.Name) == 0 { + return fmt.Errorf("statsd.ServiceCheck name is required") + } + if byte(sc.Status) < 0 || byte(sc.Status) > 3 { + return fmt.Errorf("statsd.ServiceCheck status has invalid value") + } + return nil +} + +// Encode returns the dogstatsd wire protocol representation for an serviceCheck. +// Tags may be passed which will be added to the encoded output but not to +// the Event's list of tags, eg. for default tags. +func (sc ServiceCheck) Encode(tags ...string) (string, error) { + err := sc.Check() + if err != nil { + return "", err + } + message := sc.escapedMessage() + + var buffer bytes.Buffer + buffer.WriteString("_sc|") + buffer.WriteString(sc.Name) + buffer.WriteRune('|') + buffer.WriteString(strconv.FormatInt(int64(sc.Status), 10)) + + if !sc.Timestamp.IsZero() { + buffer.WriteString("|d:") + buffer.WriteString(strconv.FormatInt(int64(sc.Timestamp.Unix()), 10)) + } + + if len(sc.Hostname) != 0 { + buffer.WriteString("|h:") + buffer.WriteString(sc.Hostname) + } + + writeTagString(&buffer, tags, sc.Tags) + + if len(message) != 0 { + buffer.WriteString("|m:") + buffer.WriteString(message) + } + + return buffer.String(), nil +} + +func (e Event) escapedText() string { + return strings.Replace(e.Text, "\n", "\\n", -1) +} + +func (sc ServiceCheck) escapedMessage() string { + msg := strings.Replace(sc.Message, "\n", "\\n", -1) + return strings.Replace(msg, "m:", `m\:`, -1) +} + +func removeNewlines(str string) string { + return strings.Replace(str, "\n", "", -1) +} + +func writeTagString(w io.Writer, tagList1, tagList2 []string) { + // the tag lists may be shared with other callers, so we cannot modify + // them in any way (which means we cannot append to them either) + // therefore we must make an entirely separate copy just for this call + totalLen := len(tagList1) + len(tagList2) + if totalLen == 0 { + return + } + tags := make([]string, 0, totalLen) + tags = append(tags, tagList1...) + tags = append(tags, tagList2...) + + io.WriteString(w, "|#") + io.WriteString(w, removeNewlines(tags[0])) + for _, tag := range tags[1:] { + io.WriteString(w, ",") + io.WriteString(w, removeNewlines(tag)) + } +} diff --git a/vendor/github.com/DataDog/datadog-go/statsd/statsd_test.go b/vendor/github.com/DataDog/datadog-go/statsd/statsd_test.go new file mode 100644 index 0000000000..3344015831 --- /dev/null +++ b/vendor/github.com/DataDog/datadog-go/statsd/statsd_test.go @@ -0,0 +1,620 @@ +// Copyright 2013 Ooyala, Inc. + +package statsd + +import ( + "fmt" + "io" + "net" + "reflect" + "strings" + "testing" + "time" +) + +var dogstatsdTests = []struct { + GlobalNamespace string + GlobalTags []string + Method string + Metric string + Value interface{} + Tags []string + Rate float64 + Expected string +}{ + {"", nil, "Gauge", "test.gauge", 1.0, nil, 1.0, "test.gauge:1.000000|g"}, + {"", nil, "Gauge", "test.gauge", 1.0, nil, 0.999999, "test.gauge:1.000000|g|@0.999999"}, + {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA"}, 1.0, "test.gauge:1.000000|g|#tagA"}, + {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA", "tagB"}, 1.0, "test.gauge:1.000000|g|#tagA,tagB"}, + {"", nil, "Gauge", "test.gauge", 1.0, []string{"tagA"}, 0.999999, "test.gauge:1.000000|g|@0.999999|#tagA"}, + {"", nil, "Count", "test.count", int64(1), []string{"tagA"}, 1.0, "test.count:1|c|#tagA"}, + {"", nil, "Count", "test.count", int64(-1), []string{"tagA"}, 1.0, "test.count:-1|c|#tagA"}, + {"", nil, "Histogram", "test.histogram", 2.3, []string{"tagA"}, 1.0, "test.histogram:2.300000|h|#tagA"}, + {"", nil, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "test.set:uuid|s|#tagA"}, + {"flubber.", nil, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "flubber.test.set:uuid|s|#tagA"}, + {"", []string{"tagC"}, "Set", "test.set", "uuid", []string{"tagA"}, 1.0, "test.set:uuid|s|#tagC,tagA"}, + {"", nil, "Count", "test.count", int64(1), []string{"hello\nworld"}, 1.0, "test.count:1|c|#helloworld"}, +} + +func assertNotPanics(t *testing.T, f func()) { + defer func() { + if r := recover(); r != nil { + t.Fatal(r) + } + }() + f() +} + +func TestClient(t *testing.T) { + addr := "localhost:1201" + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + t.Fatal(err) + } + + server, err := net.ListenUDP("udp", udpAddr) + if err != nil { + t.Fatal(err) + } + defer server.Close() + + client, err := New(addr) + if err != nil { + t.Fatal(err) + } + + for _, tt := range dogstatsdTests { + client.Namespace = tt.GlobalNamespace + client.Tags = tt.GlobalTags + method := reflect.ValueOf(client).MethodByName(tt.Method) + e := method.Call([]reflect.Value{ + reflect.ValueOf(tt.Metric), + reflect.ValueOf(tt.Value), + reflect.ValueOf(tt.Tags), + reflect.ValueOf(tt.Rate)})[0] + errInter := e.Interface() + if errInter != nil { + t.Fatal(errInter.(error)) + } + + bytes := make([]byte, 1024) + n, err := server.Read(bytes) + if err != nil { + t.Fatal(err) + } + message := bytes[:n] + if string(message) != tt.Expected { + t.Errorf("Expected: %s. Actual: %s", tt.Expected, string(message)) + } + } +} + +func TestBufferedClient(t *testing.T) { + addr := "localhost:1201" + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + t.Fatal(err) + } + + server, err := net.ListenUDP("udp", udpAddr) + if err != nil { + t.Fatal(err) + } + defer server.Close() + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + t.Fatal(err) + } + + bufferLength := 8 + client := &Client{ + conn: conn, + commands: make([]string, 0, bufferLength), + bufferLength: bufferLength, + } + + client.Namespace = "foo." + client.Tags = []string{"dd:2"} + + dur, _ := time.ParseDuration("123us") + + client.Incr("ic", nil, 1) + client.Decr("dc", nil, 1) + client.Count("cc", 1, nil, 1) + client.Gauge("gg", 10, nil, 1) + client.Histogram("hh", 1, nil, 1) + client.Timing("tt", dur, nil, 1) + client.Set("ss", "ss", nil, 1) + + if len(client.commands) != 7 { + t.Errorf("Expected client to have buffered 7 commands, but found %d\n", len(client.commands)) + } + + client.Set("ss", "xx", nil, 1) + err = client.flush() + if err != nil { + t.Errorf("Error sending: %s", err) + } + + if len(client.commands) != 0 { + t.Errorf("Expecting send to flush commands, but found %d\n", len(client.commands)) + } + + buffer := make([]byte, 4096) + n, err := io.ReadAtLeast(server, buffer, 1) + result := string(buffer[:n]) + + if err != nil { + t.Error(err) + } + + expected := []string{ + `foo.ic:1|c|#dd:2`, + `foo.dc:-1|c|#dd:2`, + `foo.cc:1|c|#dd:2`, + `foo.gg:10.000000|g|#dd:2`, + `foo.hh:1.000000|h|#dd:2`, + `foo.tt:0.123000|ms|#dd:2`, + `foo.ss:ss|s|#dd:2`, + `foo.ss:xx|s|#dd:2`, + } + + for i, res := range strings.Split(result, "\n") { + if res != expected[i] { + t.Errorf("Got `%s`, expected `%s`", res, expected[i]) + } + } + + client.Event(&Event{Title: "title1", Text: "text1", Priority: Normal, AlertType: Success, Tags: []string{"tagg"}}) + client.SimpleEvent("event1", "text1") + + if len(client.commands) != 2 { + t.Errorf("Expected to find %d commands, but found %d\n", 2, len(client.commands)) + } + + err = client.flush() + + if err != nil { + t.Errorf("Error sending: %s", err) + } + + if len(client.commands) != 0 { + t.Errorf("Expecting send to flush commands, but found %d\n", len(client.commands)) + } + + buffer = make([]byte, 1024) + n, err = io.ReadAtLeast(server, buffer, 1) + result = string(buffer[:n]) + + if err != nil { + t.Error(err) + } + + if n == 0 { + t.Errorf("Read 0 bytes but expected more.") + } + + expected = []string{ + `_e{6,5}:title1|text1|p:normal|t:success|#dd:2,tagg`, + `_e{6,5}:event1|text1|#dd:2`, + } + + for i, res := range strings.Split(result, "\n") { + if res != expected[i] { + t.Errorf("Got `%s`, expected `%s`", res, expected[i]) + } + } + +} + +func TestJoinMaxSize(t *testing.T) { + c := Client{} + elements := []string{"abc", "abcd", "ab", "xyz", "foobaz", "x", "wwxxyyzz"} + res, n := c.joinMaxSize(elements, " ", 8) + + if len(res) != len(n) && len(res) != 4 { + t.Errorf("Was expecting 4 frames to flush but got: %v - %v", n, res) + } + if n[0] != 2 { + t.Errorf("Was expecting 2 elements in first frame but got: %v", n[0]) + } + if string(res[0]) != "abc abcd" { + t.Errorf("Join should have returned \"abc abcd\" in frame, but found: %s", res[0]) + } + if n[1] != 2 { + t.Errorf("Was expecting 2 elements in second frame but got: %v - %v", n[1], n) + } + if string(res[1]) != "ab xyz" { + t.Errorf("Join should have returned \"ab xyz\" in frame, but found: %s", res[1]) + } + if n[2] != 2 { + t.Errorf("Was expecting 2 elements in third frame but got: %v - %v", n[2], n) + } + if string(res[2]) != "foobaz x" { + t.Errorf("Join should have returned \"foobaz x\" in frame, but found: %s", res[2]) + } + if n[3] != 1 { + t.Errorf("Was expecting 1 element in fourth frame but got: %v - %v", n[3], n) + } + if string(res[3]) != "wwxxyyzz" { + t.Errorf("Join should have returned \"wwxxyyzz\" in frame, but found: %s", res[3]) + } + + res, n = c.joinMaxSize(elements, " ", 11) + + if len(res) != len(n) && len(res) != 3 { + t.Errorf("Was expecting 3 frames to flush but got: %v - %v", n, res) + } + if n[0] != 3 { + t.Errorf("Was expecting 3 elements in first frame but got: %v", n[0]) + } + if string(res[0]) != "abc abcd ab" { + t.Errorf("Join should have returned \"abc abcd ab\" in frame, but got: %s", res[0]) + } + if n[1] != 2 { + t.Errorf("Was expecting 2 elements in second frame but got: %v", n[1]) + } + if string(res[1]) != "xyz foobaz" { + t.Errorf("Join should have returned \"xyz foobaz\" in frame, but got: %s", res[1]) + } + if n[2] != 2 { + t.Errorf("Was expecting 2 elements in third frame but got: %v", n[2]) + } + if string(res[2]) != "x wwxxyyzz" { + t.Errorf("Join should have returned \"x wwxxyyzz\" in frame, but got: %s", res[2]) + } + + res, n = c.joinMaxSize(elements, " ", 8) + + if len(res) != len(n) && len(res) != 7 { + t.Errorf("Was expecting 7 frames to flush but got: %v - %v", n, res) + } + if n[0] != 1 { + t.Errorf("Separator is long, expected a single element in frame but got: %d - %v", n[0], res) + } + if string(res[0]) != "abc" { + t.Errorf("Join should have returned \"abc\" in first frame, but got: %s", res) + } + if n[1] != 1 { + t.Errorf("Separator is long, expected a single element in frame but got: %d - %v", n[1], res) + } + if string(res[1]) != "abcd" { + t.Errorf("Join should have returned \"abcd\" in second frame, but got: %s", res[1]) + } + if n[2] != 1 { + t.Errorf("Separator is long, expected a single element in third frame but got: %d - %v", n[2], res) + } + if string(res[2]) != "ab" { + t.Errorf("Join should have returned \"ab\" in third frame, but got: %s", res[2]) + } + if n[3] != 1 { + t.Errorf("Separator is long, expected a single element in fourth frame but got: %d - %v", n[3], res) + } + if string(res[3]) != "xyz" { + t.Errorf("Join should have returned \"xyz\" in fourth frame, but got: %s", res[3]) + } + if n[4] != 1 { + t.Errorf("Separator is long, expected a single element in fifth frame but got: %d - %v", n[4], res) + } + if string(res[4]) != "foobaz" { + t.Errorf("Join should have returned \"foobaz\" in fifth frame, but got: %s", res[4]) + } + if n[5] != 1 { + t.Errorf("Separator is long, expected a single element in sixth frame but got: %d - %v", n[5], res) + } + if string(res[5]) != "x" { + t.Errorf("Join should have returned \"x\" in sixth frame, but got: %s", res[5]) + } + if n[6] != 1 { + t.Errorf("Separator is long, expected a single element in seventh frame but got: %d - %v", n[6], res) + } + if string(res[6]) != "wwxxyyzz" { + t.Errorf("Join should have returned \"wwxxyyzz\" in seventh frame, but got: %s", res[6]) + } + + res, n = c.joinMaxSize(elements[4:], " ", 6) + if len(res) != len(n) && len(res) != 3 { + t.Errorf("Was expecting 3 frames to flush but got: %v - %v", n, res) + + } + if n[0] != 1 { + t.Errorf("Element should just fit in frame - expected single element in frame: %d - %v", n[0], res) + } + if string(res[0]) != "foobaz" { + t.Errorf("Join should have returned \"foobaz\" in first frame, but got: %s", res[0]) + } + if n[1] != 1 { + t.Errorf("Single element expected in frame, but got. %d - %v", n[1], res) + } + if string(res[1]) != "x" { + t.Errorf("Join should' have returned \"x\" in second frame, but got: %s", res[1]) + } + if n[2] != 1 { + t.Errorf("Even though element is greater then max size we still try to send it. %d - %v", n[2], res) + } + if string(res[2]) != "wwxxyyzz" { + t.Errorf("Join should have returned \"wwxxyyzz\" in third frame, but got: %s", res[2]) + } +} + +func TestSendMsg(t *testing.T) { + addr := "localhost:1201" + udpAddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + t.Fatal(err) + } + + server, err := net.ListenUDP("udp", udpAddr) + if err != nil { + t.Fatal(err) + } + defer server.Close() + + conn, err := net.DialUDP("udp", nil, udpAddr) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + client := &Client{ + conn: conn, + bufferLength: 0, + } + + err = client.sendMsg(strings.Repeat("x", MaxUDPPayloadSize+1)) + if err == nil { + t.Error("Expected error to be returned if message size is bigger than MaxUDPPayloadSize") + } + + longMsg := strings.Repeat("x", MaxUDPPayloadSize) + + err = client.sendMsg(longMsg) + if err != nil { + t.Errorf("Expected no error to be returned if message size is smaller or equal to MaxUDPPayloadSize, got: %s", err.Error()) + } + + buffer := make([]byte, MaxUDPPayloadSize+1) + n, err := io.ReadAtLeast(server, buffer, 1) + + if err != nil { + t.Fatalf("Expected no error to be returned reading the buffer, got: %s", err.Error()) + } + + if n != MaxUDPPayloadSize { + t.Fatalf("Failed to read full message from buffer. Got size `%d` expected `%d`", n, MaxUDPPayloadSize) + } + + if string(buffer[:n]) != longMsg { + t.Fatalf("The received message did not match what we expect.") + } + + client = &Client{ + conn: conn, + commands: make([]string, 0, 1), + bufferLength: 1, + } + + err = client.sendMsg(strings.Repeat("x", MaxUDPPayloadSize+1)) + if err == nil { + t.Error("Expected error to be returned if message size is bigger than MaxUDPPayloadSize") + } + + err = client.sendMsg(longMsg) + if err != nil { + t.Errorf("Expected no error to be returned if message size is smaller or equal to MaxUDPPayloadSize, got: %s", err.Error()) + } + + client.Lock() + err = client.flush() + client.Unlock() + + if err != nil { + t.Fatalf("Expected no error to be returned flushing the client, got: %s", err.Error()) + } + + buffer = make([]byte, MaxUDPPayloadSize+1) + n, err = io.ReadAtLeast(server, buffer, 1) + + if err != nil { + t.Fatalf("Expected no error to be returned reading the buffer, got: %s", err.Error()) + } + + if n != MaxUDPPayloadSize { + t.Fatalf("Failed to read full message from buffer. Got size `%d` expected `%d`", n, MaxUDPPayloadSize) + } + + if string(buffer[:n]) != longMsg { + t.Fatalf("The received message did not match what we expect.") + } +} + +func TestNilSafe(t *testing.T) { + var c *Client + assertNotPanics(t, func() { c.Close() }) + assertNotPanics(t, func() { c.Count("", 0, nil, 1) }) + assertNotPanics(t, func() { c.Histogram("", 0, nil, 1) }) + assertNotPanics(t, func() { c.Gauge("", 0, nil, 1) }) + assertNotPanics(t, func() { c.Set("", "", nil, 1) }) + assertNotPanics(t, func() { c.send("", "", nil, 1) }) + assertNotPanics(t, func() { c.SimpleEvent("", "") }) +} + +func TestEvents(t *testing.T) { + matrix := []struct { + event *Event + encoded string + }{ + { + NewEvent("Hello", "Something happened to my event"), + `_e{5,30}:Hello|Something happened to my event`, + }, { + &Event{Title: "hi", Text: "okay", AggregationKey: "foo"}, + `_e{2,4}:hi|okay|k:foo`, + }, { + &Event{Title: "hi", Text: "okay", AggregationKey: "foo", AlertType: Info}, + `_e{2,4}:hi|okay|k:foo|t:info`, + }, { + &Event{Title: "hi", Text: "w/e", AlertType: Error, Priority: Normal}, + `_e{2,3}:hi|w/e|p:normal|t:error`, + }, { + &Event{Title: "hi", Text: "uh", Tags: []string{"host:foo", "app:bar"}}, + `_e{2,2}:hi|uh|#host:foo,app:bar`, + }, { + &Event{Title: "hi", Text: "line1\nline2", Tags: []string{"hello\nworld"}}, + `_e{2,12}:hi|line1\nline2|#helloworld`, + }, + } + + for _, m := range matrix { + r, err := m.event.Encode() + if err != nil { + t.Errorf("Error encoding: %s\n", err) + continue + } + if r != m.encoded { + t.Errorf("Expected `%s`, got `%s`\n", m.encoded, r) + } + } + + e := NewEvent("", "hi") + if _, err := e.Encode(); err == nil { + t.Errorf("Expected error on empty Title.") + } + + e = NewEvent("hi", "") + if _, err := e.Encode(); err == nil { + t.Errorf("Expected error on empty Text.") + } + + e = NewEvent("hello", "world") + s, err := e.Encode("tag1", "tag2") + if err != nil { + t.Error(err) + } + expected := "_e{5,5}:hello|world|#tag1,tag2" + if s != expected { + t.Errorf("Expected %s, got %s", expected, s) + } + if len(e.Tags) != 0 { + t.Errorf("Modified event in place illegally.") + } +} + +func TestServiceChecks(t *testing.T) { + matrix := []struct { + serviceCheck *ServiceCheck + encoded string + }{ + { + NewServiceCheck("DataCatService", Ok), + `_sc|DataCatService|0`, + }, { + NewServiceCheck("DataCatService", Warn), + `_sc|DataCatService|1`, + }, { + NewServiceCheck("DataCatService", Critical), + `_sc|DataCatService|2`, + }, { + NewServiceCheck("DataCatService", Unknown), + `_sc|DataCatService|3`, + }, { + &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat"}, + `_sc|DataCatService|0|h:DataStation.Cat`, + }, { + &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message"}, + `_sc|DataCatService|0|h:DataStation.Cat|m:Here goes valuable message`, + }, { + &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш"}, + `_sc|DataCatService|0|h:DataStation.Cat|m:Here are some cyrillic chars: к л м н о п р с т у ф х ц ч ш`, + }, { + &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes valuable message", Tags: []string{"host:foo", "app:bar"}}, + `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes valuable message`, + }, { + &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes \n that should be escaped", Tags: []string{"host:foo", "app:b\nar"}}, + `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes \n that should be escaped`, + }, { + &ServiceCheck{Name: "DataCatService", Status: Ok, Hostname: "DataStation.Cat", Message: "Here goes m: that should be escaped", Tags: []string{"host:foo", "app:bar"}}, + `_sc|DataCatService|0|h:DataStation.Cat|#host:foo,app:bar|m:Here goes m\: that should be escaped`, + }, + } + + for _, m := range matrix { + r, err := m.serviceCheck.Encode() + if err != nil { + t.Errorf("Error encoding: %s\n", err) + continue + } + if r != m.encoded { + t.Errorf("Expected `%s`, got `%s`\n", m.encoded, r) + } + } + + sc := NewServiceCheck("", Ok) + if _, err := sc.Encode(); err == nil { + t.Errorf("Expected error on empty Name.") + } + + sc = NewServiceCheck("sc", ServiceCheckStatus(5)) + if _, err := sc.Encode(); err == nil { + t.Errorf("Expected error on invalid status value.") + } + + sc = NewServiceCheck("hello", Warn) + s, err := sc.Encode("tag1", "tag2") + if err != nil { + t.Error(err) + } + expected := "_sc|hello|1|#tag1,tag2" + if s != expected { + t.Errorf("Expected %s, got %s", expected, s) + } + if len(sc.Tags) != 0 { + t.Errorf("Modified serviceCheck in place illegally.") + } +} + +// These benchmarks show that using a buffer instead of sprintf-ing together +// a bunch of intermediate strings is 4-5x faster + +func BenchmarkFormatNew(b *testing.B) { + b.StopTimer() + c := &Client{} + c.Namespace = "foo.bar." + c.Tags = []string{"app:foo", "host:bar"} + b.StartTimer() + for i := 0; i < b.N; i++ { + c.format("system.cpu.idle", "10", []string{"foo"}, 1) + c.format("system.cpu.load", "0.1", nil, 0.9) + } +} + +// Old formatting function, added to client for tests +func (c *Client) formatOld(name, value string, tags []string, rate float64) string { + if rate < 1 { + value = fmt.Sprintf("%s|@%f", value, rate) + } + if c.Namespace != "" { + name = fmt.Sprintf("%s%s", c.Namespace, name) + } + + tags = append(c.Tags, tags...) + if len(tags) > 0 { + value = fmt.Sprintf("%s|#%s", value, strings.Join(tags, ",")) + } + + return fmt.Sprintf("%s:%s", name, value) + +} + +func BenchmarkFormatOld(b *testing.B) { + b.StopTimer() + c := &Client{} + c.Namespace = "foo.bar." + c.Tags = []string{"app:foo", "host:bar"} + b.StartTimer() + for i := 0; i < b.N; i++ { + c.formatOld("system.cpu.idle", "10", []string{"foo"}, 1) + c.formatOld("system.cpu.load", "0.1", nil, 0.9) + } +} diff --git a/vendor/github.com/Dieterbe/artisanalhistogram/README.md b/vendor/github.com/Dieterbe/artisanalhistogram/README.md new file mode 100644 index 0000000000..e313831c87 --- /dev/null +++ b/vendor/github.com/Dieterbe/artisanalhistogram/README.md @@ -0,0 +1,102 @@ +## Artisanal histogram + + +Hand crafted histograms, made with love. To power insights from networked applications. Not general-purpose. +Also somewhat experimental. + + +### goals + +* optimize for typical range of networked applications, where we care for durations between roughly 1ms and 15s. + anything under a ms is plenty fast. Even if it was a a microsecond or less, we don't mind it being reported in the 1ms bucket. + Likewise, anything over 15s is awful. Whether it's 15s, 20s or 30s. Doesn't really matter. They are all terrible and can go in the same bucket. + Contrast this to [hdrhistograms](https://github.com/codahale/hdrhistogram) which are designed to provide buckets which can provide close approximations over huge ranges which I don't actually care about. + This way we can also store the data in a more compact fashion. +* understandability of the class intervals ("buckets"), eg have rounded intervals that show well on UI's. + powers of two are [faster to compute](http://pvk.ca/Blog/2015/06/27/linear-log-bucketing-fast-versatile-simple/) but then your buckets are like 1024, 1280, etc. + I want to be able to answer questions like "how many requests were completed within 5 milliseconds? how many in a second or less"? + Every histogram can return percentiles with a given degree of error, often configurable. + We allow for a bit more error in the typical case (and much more error for extreme outliers such as <<1ms and >>15s) in return for accurate numbers in histograms the way people actually want to look at them. +* consistent bucket sizes across different histograms so we can easily aggregate different histograms together (e.g. for timeseries rollups or runtime consolidation). +(this rules out [gohistogram](https://github.com/VividCortex/gohistogram) +* give equal weight to all samples within a given observation interval, and no weight to samples from prior intervals (contrast to EWMA based approaches) +* good enough performance to not be an overhead for applications doing millions of histogram adds per second. See below + +### performance + +Performance is not riduculously fast like [some of the histograms](https://github.com/dgryski/go-linlog) that only need a few instructions per Add because their buckets have boundaries optimized for powers of two. We have "human friendly" buckets, so our adds are up to about 20ns (e.g. 1M/second at 5% cpu usage). which is fast enough for now, but could be improved more later. +Getting a report takes about 700ns. + +On my i7-4810MQ CPU @ 2.80GHz : + +``` +Benchmark_AddDurationBest-8 100000000 12.7 ns/op +Benchmark_AddDurationWorst-8 100000000 12.6 ns/op +Benchmark_AddDurationEvenDistribution-8 100000000 19.5 ns/op +Benchmark_AddDurationUpto1s-8 100000000 20.8 ns/op +Benchmark_Report1kvals-8 3000000 566 ns/op +PASS +ok github.com/Dieterbe/artisanalhistogram/hist1 117.842s +``` + +### warning + +if it wasn't clear yet, you need to understand the implications of this approach. For data <1ms or >15s the data will be significantly different from actual results, however, the conclusions +will be the same (in the first case "everything is great" and in the latter "our system is doing terribly"). statistical summaries such as means which are already misleading on their own can get even more misleading, so pay attention to what the histogram buckets say. Those are the source of thruth. + + +### buckets + +the following classes (buckets) have been manually crafted in an effort to +* cover the space well +* represent boundaries people actually care about +* still minimize errors as well as possible, by scaling up the class intervals corresponding to the bucket boundaries. + +boundaries are in ms. +exactly 32 buckets. (32x4=128B size) + +``` +1 +2 +3 +5 +7.5 +10 +15 +20 +30 +40 +50 +65 +80 +100 +150 +200 +300 +400 +500 +650 +800 +1000 +1500 +2000 +3000 +4000 +5000 +6500 +8000 +10000 +15000 +inf +``` + +### implementation notes + +* 32 buckets because that should fit nicely on graphical UI's + also round size of 128B +* math.MaxUint32, i.e. 4294967295 or "4 billion" should be a reasonable count limit. to save space. + it's up to the operator to keep tabs on the bucket counts and judge whether the data is to be trusted or not. + if you get anywhere near this volume it's time to look into something better or shorten your reporting interval. + unfortunately trying to automatically report almost overflows would be to expensive. + Note that for the total count (across all buckets) we do report validity, because that's easy to establish when reporting. + but validity of individual buckets is up to the user. +* because we use uint32 to track microseconds, inserting durations higher than 4294s (71minutes) will also overflow and possibly fall into the wrong buckets diff --git a/vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s.go b/vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s.go index 4431a97ad1..039a21a350 100644 --- a/vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s.go +++ b/vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s.go @@ -8,7 +8,7 @@ import ( const maxVal = uint32(29999999) // used to report max number as 29s even if it's higher -// Hist15s is optimized for measurements between 500ms and 12h +// Hist15s is optimized for measurements between 1ms and 15s type Hist15s struct { limits [32]uint32 // in micros counts [32]uint32 diff --git a/vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s_test.go b/vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s_test.go new file mode 100644 index 0000000000..443c24b24b --- /dev/null +++ b/vendor/github.com/Dieterbe/artisanalhistogram/hist15s/hist15s_test.go @@ -0,0 +1,212 @@ +package hist15s + +import ( + "math/rand" + "testing" + "time" +) + +func Test_SearchBucket(t *testing.T) { + hist := New() // we just want access to the buckets. + cases := []struct { + val uint32 + bucket int + }{ + {0, 0}, + {1, 0}, + {999, 0}, + {1000, 0}, + {50000, 10}, + {50001, 11}, + {64449, 11}, + {65000, 11}, + {65001, 12}, + {15000000, 30}, + {15000001, 31}, + {25000000, 31}, + {99000000, 31}, + {4293000000, 31}, // anything higher than this is undefined + } + for _, cas := range cases { + bucket := searchBucket(hist.limits, cas.val) + if bucket != cas.bucket { + t.Fatalf("expected %d to be in bucket %d, got bucket %d", cas.val, cas.bucket, bucket) + } + } +} + +func Test_Report(t *testing.T) { + hist := New() + hist.AddDuration(time.Duration(10) * time.Microsecond) + hist.AddDuration(time.Duration(4) * time.Millisecond) + hist.AddDuration(time.Duration(5) * time.Millisecond) + hist.AddDuration(time.Duration(10) * time.Millisecond) + hist.AddDuration(time.Duration(1000) * time.Millisecond) + hist.AddDuration(time.Duration(1000) * time.Millisecond) + hist.AddDuration(time.Duration(1000) * time.Millisecond) + hist.AddDuration(time.Duration(1001) * time.Millisecond) + hist.AddDuration(time.Duration(1200) * time.Millisecond) + hist.AddDuration(time.Duration(21) * time.Second) + + snap := hist.Snapshot() + exp := []uint32{ + 1, //1000 micros, + 0, //2000, + 0, //3000, + 2, //5000, + 0, //7500, + 1, //10000, + 0, //15000, + 0, //20000, + 0, //30000, + 0, //40000, + 0, //50000, + 0, //65000, + 0, //80000, + 0, //100000, + 0, //150000, + 0, //200000, + 0, //300000, + 0, //400000, + 0, //500000, + 0, //650000, + 0, //800000, + 3, //1000000, + 2, //1500000, + 0, //2000000, + 0, //3000000, + 0, //4000000, + 0, //5000000, + 0, //6500000, + 0, //8000000, + 0, //10000000, + 0, //15000000, + 1, //29999999, // used to represent inf + } + for i, cnt := range snap { + if cnt != exp[i] { + t.Fatalf("expected snap[%d] = %d, got %d", i, exp[i], cnt) + } + } + + r, ok := hist.Report(snap) + if !ok { + t.Fatalf("expected the report to be valid") + } + + actualTotal := uint32(10 + 4000 + 5000 + 10000 + 3000*1000 + 2002*1000 + 1200000 + 21000000) + actualMean := actualTotal / 10 + expTotal := uint32(1000 + 2*5000 + 10000 + 3*1000000 + 2*1500000 + 29999999) + + if r.Min != 1000 { + t.Fatalf("expected min %d, got %d", 1000, r.Min) + } + + expMean := expTotal / 10 + if r.Mean != expMean { + t.Fatalf("expected mean %d, got %d (actual mean %d)", expMean, r.Mean, actualMean) + } + t.Logf("actual mean %d, our mean %d (big outlier!)", actualMean, r.Mean) + + if r.Median != 1000000 { + t.Fatalf("expected med %d, got %d", 1000000, r.Median) + } + + if r.P75 != 1500000 { + t.Fatalf("expected p75 %d, got %d", 1500000, r.P75) + } + + if r.P90 != 1500000 { + t.Fatalf("expected p90 %d, got %d", 1500000, r.P90) + } + + expMax := uint32(29999999) + if r.Max != expMax { + t.Fatalf("expected max %d, got %d", expMax, r.Max) + } + + if r.Count != 10 { + t.Fatalf("expected count %d, got %d (actual count %d)", 10, r.Count) + } + +} + +// all values under 1ms so they go into first bucket +func Benchmark_AddDurationBest(b *testing.B) { + data := make([]time.Duration, b.N) + hist := New() + for i := 0; i < b.N; i++ { + data[i] = time.Duration(rand.Intn(1000)) * time.Microsecond + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + hist.AddDuration(data[i]) + } +} + +// all values over 15s so they go into last bucket +func Benchmark_AddDurationWorst(b *testing.B) { + data := make([]time.Duration, b.N) + hist := New() + for i := 0; i < b.N; i++ { + data[i] = time.Duration(16+rand.Intn(10)) * time.Second + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + hist.AddDuration(data[i]) + } +} + +// all between 0ms and 20s to they go anywhere. but later buckets get higher proportion cause they cover more ground +func Benchmark_AddDurationEvenDistribution(b *testing.B) { + data := make([]time.Duration, b.N) + hist := New() + for i := 0; i < b.N; i++ { + data[i] = time.Duration(rand.Intn(20000000)) * time.Microsecond + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + hist.AddDuration(data[i]) + } +} + +// all between 0ms and 1s. more realistic. control over distribution would be better though +func Benchmark_AddDurationUpto1s(b *testing.B) { + data := make([]time.Duration, b.N) + hist := New() + for i := 0; i < b.N; i++ { + data[i] = time.Duration(rand.Intn(1000)) * time.Millisecond + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + hist.AddDuration(data[i]) + } +} + +var _report Report + +func Benchmark_Report1kvals(b *testing.B) { + data := make([]time.Duration, 1000) + hist := New() + for i := 0; i < 1000; i++ { + data[i] = time.Duration(rand.Intn(1000)) * time.Millisecond + } + var r Report + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + + //snapshots resets the state, so we have to repopulate it + b.StopTimer() + for i := 0; i < 1000; i++ { + hist.AddDuration(data[i]) + } + b.StartTimer() + + snap := hist.Snapshot() + r, _ = hist.Report(snap) + } + _report = r + +} diff --git a/vendor/github.com/Dieterbe/profiletrigger/README.md b/vendor/github.com/Dieterbe/profiletrigger/README.md new file mode 100644 index 0000000000..426314da7a --- /dev/null +++ b/vendor/github.com/Dieterbe/profiletrigger/README.md @@ -0,0 +1,13 @@ +[![Go Report Card](https://goreportcard.com/badge/github.com/Dieterbe/profiletrigger)](https://goreportcard.com/report/github.com/Dieterbe/profiletrigger) +[![GoDoc](https://godoc.org/github.com/Dieterbe/profiletrigger?status.svg)](https://godoc.org/github.com/Dieterbe/profiletrigger) + +automatically trigger a profile in your golang (go) application when a condition is matched. + +# currently implemented: + +* when process obtains certain number of bytes from the system, save a heap (memory) profile +* when cpu usage reaches a certain percentage, save a cpu profile. + +# demo + +see the included cpudemo and heapdemo programs, which gradually add more and cpu and heap utilisation, to show the profiletrigger kicking in. diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 0000000000..b883f1fdc6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 0000000000..b8b569d774 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 0000000000..5680010575 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,22 @@ +# go-winio + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. + +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 0000000000..2be34af431 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/backup_test.go b/vendor/github.com/Microsoft/go-winio/backup_test.go new file mode 100644 index 0000000000..cc5a0c5ff0 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup_test.go @@ -0,0 +1,255 @@ +package winio + +import ( + "io" + "io/ioutil" + "os" + "syscall" + "testing" +) + +var testFileName string + +func TestMain(m *testing.M) { + f, err := ioutil.TempFile("", "tmp") + if err != nil { + panic(err) + } + testFileName = f.Name() + f.Close() + defer os.Remove(testFileName) + os.Exit(m.Run()) +} + +func makeTestFile(makeADS bool) error { + os.Remove(testFileName) + f, err := os.Create(testFileName) + if err != nil { + return err + } + defer f.Close() + _, err = f.Write([]byte("testing 1 2 3\n")) + if err != nil { + return err + } + if makeADS { + a, err := os.Create(testFileName + ":ads.txt") + if err != nil { + return err + } + defer a.Close() + _, err = a.Write([]byte("alternate data stream\n")) + if err != nil { + return err + } + } + return nil +} + +func TestBackupRead(t *testing.T) { + err := makeTestFile(true) + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if len(b) == 0 { + t.Fatal("no data") + } +} + +func TestBackupStreamRead(t *testing.T) { + err := makeTestFile(true) + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + + br := NewBackupStreamReader(r) + gotData := false + gotAltData := false + for { + hdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + switch hdr.Id { + case BackupData: + if gotData { + t.Fatal("duplicate data") + } + if hdr.Name != "" { + t.Fatalf("unexpected name %s", hdr.Name) + } + b, err := ioutil.ReadAll(br) + if err != nil { + t.Fatal(err) + } + if string(b) != "testing 1 2 3\n" { + t.Fatalf("incorrect data %v", b) + } + gotData = true + case BackupAlternateData: + if gotAltData { + t.Fatal("duplicate alt data") + } + if hdr.Name != ":ads.txt:$DATA" { + t.Fatalf("incorrect name %s", hdr.Name) + } + b, err := ioutil.ReadAll(br) + if err != nil { + t.Fatal(err) + } + if string(b) != "alternate data stream\n" { + t.Fatalf("incorrect data %v", b) + } + gotAltData = true + default: + t.Fatalf("unknown stream ID %d", hdr.Id) + } + } + if !gotData || !gotAltData { + t.Fatal("missing stream") + } +} + +func TestBackupStreamWrite(t *testing.T) { + f, err := os.Create(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + w := NewBackupFileWriter(f, false) + defer w.Close() + + data := "testing 1 2 3\n" + altData := "alternate stream\n" + + br := NewBackupStreamWriter(w) + err = br.WriteHeader(&BackupHeader{Id: BackupData, Size: int64(len(data))}) + if err != nil { + t.Fatal(err) + } + n, err := br.Write([]byte(data)) + if err != nil { + t.Fatal(err) + } + if n != len(data) { + t.Fatal("short write") + } + + err = br.WriteHeader(&BackupHeader{Id: BackupAlternateData, Size: int64(len(altData)), Name: ":ads.txt:$DATA"}) + if err != nil { + t.Fatal(err) + } + n, err = br.Write([]byte(altData)) + if err != nil { + t.Fatal(err) + } + if n != len(altData) { + t.Fatal("short write") + } + + f.Close() + + b, err := ioutil.ReadFile(testFileName) + if err != nil { + t.Fatal(err) + } + if string(b) != data { + t.Fatalf("wrong data %v", b) + } + + b, err = ioutil.ReadFile(testFileName + ":ads.txt") + if err != nil { + t.Fatal(err) + } + if string(b) != altData { + t.Fatalf("wrong data %v", b) + } +} + +func makeSparseFile() error { + os.Remove(testFileName) + f, err := os.Create(testFileName) + if err != nil { + return err + } + defer f.Close() + + const ( + FSCTL_SET_SPARSE = 0x000900c4 + FSCTL_SET_ZERO_DATA = 0x000980c8 + ) + + err = syscall.DeviceIoControl(syscall.Handle(f.Fd()), FSCTL_SET_SPARSE, nil, 0, nil, 0, nil, nil) + if err != nil { + return err + } + + _, err = f.Write([]byte("testing 1 2 3\n")) + if err != nil { + return err + } + + _, err = f.Seek(1000000, 0) + if err != nil { + return err + } + + _, err = f.Write([]byte("more data later\n")) + if err != nil { + return err + } + + return nil +} + +func TestBackupSparseFile(t *testing.T) { + err := makeSparseFile() + if err != nil { + t.Fatal(err) + } + + f, err := os.Open(testFileName) + if err != nil { + t.Fatal(err) + } + defer f.Close() + r := NewBackupFileReader(f, false) + defer r.Close() + + br := NewBackupStreamReader(r) + for { + hdr, err := br.Next() + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + + t.Log(hdr) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 0000000000..b37e930d6a --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/ea_test.go b/vendor/github.com/Microsoft/go-winio/ea_test.go new file mode 100644 index 0000000000..92d9d45727 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea_test.go @@ -0,0 +1,89 @@ +package winio + +import ( + "io/ioutil" + "os" + "reflect" + "syscall" + "testing" + "unsafe" +) + +var ( + testEas = []ExtendedAttribute{ + {Name: "foo", Value: []byte("bar")}, + {Name: "fizz", Value: []byte("buzz")}, + } + + testEasEncoded = []byte{16, 0, 0, 0, 0, 3, 3, 0, 102, 111, 111, 0, 98, 97, 114, 0, 0, 0, 0, 0, 0, 4, 4, 0, 102, 105, 122, 122, 0, 98, 117, 122, 122, 0, 0, 0} + testEasNotPadded = testEasEncoded[0 : len(testEasEncoded)-3] + testEasTruncated = testEasEncoded[0:20] +) + +func Test_RoundTripEas(t *testing.T) { + b, err := EncodeExtendedAttributes(testEas) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEasEncoded, b) { + t.Fatalf("encoded mismatch %v %v", testEasEncoded, b) + } + eas, err := DecodeExtendedAttributes(b) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func Test_EasDontNeedPaddingAtEnd(t *testing.T) { + eas, err := DecodeExtendedAttributes(testEasNotPadded) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(testEas, eas) { + t.Fatalf("mismatch %+v %+v", testEas, eas) + } +} + +func Test_TruncatedEasFailCorrectly(t *testing.T) { + _, err := DecodeExtendedAttributes(testEasTruncated) + if err == nil { + t.Fatal("expected error") + } +} + +func Test_NilEasEncodeAndDecodeAsNil(t *testing.T) { + b, err := EncodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(b) != 0 { + t.Fatal("expected empty") + } + eas, err := DecodeExtendedAttributes(nil) + if err != nil { + t.Fatal(err) + } + if len(eas) != 0 { + t.Fatal("expected empty") + } +} + +// Test_SetFileEa makes sure that the test buffer is actually parsable by NtSetEaFile. +func Test_SetFileEa(t *testing.T) { + f, err := ioutil.TempFile("", "winio") + if err != nil { + t.Fatal(err) + } + defer os.Remove(f.Name()) + defer f.Close() + ntdll := syscall.MustLoadDLL("ntdll.dll") + ntSetEaFile := ntdll.MustFindProc("NtSetEaFile") + var iosb [2]uintptr + r, _, _ := ntSetEaFile.Call(f.Fd(), uintptr(unsafe.Pointer(&iosb[0])), uintptr(unsafe.Pointer(&testEasEncoded[0])), uintptr(len(testEasEncoded))) + if r != 0 { + t.Fatalf("NtSetEaFile failed with %08x", r) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 0000000000..57ac3696a9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,310 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + return makeWin32File(h) +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + // Set the timer resolution to 1. This fixes a performance regression in golang 1.6. + timeBeginPeriod(1) + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 0000000000..b1d60abb83 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,60 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "syscall" + "unsafe" +) + +//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx +//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle + +const ( + fileBasicInfo = 0 + fileIDInfo = 0x12 +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime + FileAttributes uintptr // includes padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 0000000000..44340b8167 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,404 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cPIPE_ACCESS_DUPLEX = 0x3 + cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_REJECT_REMOTE_CLIENTS = 0x8 + + cPIPE_UNLIMITED_INSTANCES = 255 + + cNMPWAIT_USE_DEFAULT_WAIT = 0 + cNMPWAIT_NOWAIT = 1 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then the timeout +// is the default timeout established by the pipe server. +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } + var err error + var h syscall.Handle + for { + h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != cERROR_PIPE_BUSY { + break + } + now := time.Now() + var ms uint32 + if absTimeout.IsZero() { + ms = cNMPWAIT_USE_DEFAULT_WAIT + } else if now.After(absTimeout) { + ms = cNMPWAIT_NOWAIT + } else { + ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) + } + err = waitNamedPipe(path, ms) + if err != nil { + if err == cERROR_SEM_TIMEOUT { + return nil, ErrTimeout + } + break + } + } + if err != nil { + return nil, &os.PathError{Op: "open", Path: path, Err: err} + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + var state uint32 + err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) + if err != nil { + return nil, err + } + + if state&cPIPE_READMODE_MESSAGE != 0 { + return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + securityDescriptor []byte + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED + if first { + flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE + } + + var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS + if c.MessageMode { + mode |= cPIPE_TYPE_MESSAGE + } + + sa := &syscall.SecurityAttributes{} + sa.Length = uint32(unsafe.Sizeof(*sa)) + if securityDescriptor != nil { + len := uint32(len(securityDescriptor)) + sa.SecurityDescriptor = localAlloc(0, len) + defer localFree(sa.SecurityDescriptor) + copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) + } + h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + p, err := l.makeServerPipe() + if err == nil { + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + closed = true + } + } + responseCh <- acceptResponse{p, err} + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size the input buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + // Immediately open and then close a client handle so that the named pipe is + // created but not currently accepting connections. + h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err != nil { + syscall.Close(h) + return nil, err + } + syscall.Close(h2) + l := &win32PipeListener{ + firstHandle: h, + path: path, + securityDescriptor: sd, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe_test.go b/vendor/github.com/Microsoft/go-winio/pipe_test.go new file mode 100644 index 0000000000..3bc02dae95 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe_test.go @@ -0,0 +1,424 @@ +package winio + +import ( + "bufio" + "io" + "net" + "os" + "syscall" + "testing" + "time" +) + +var testPipeName = `\\.\pipe\winiotestpipe` + +var aLongTimeAgo = time.Unix(1, 0) + +func TestDialUnknownFailsImmediately(t *testing.T) { + _, err := DialPipe(testPipeName, nil) + if err.(*os.PathError).Err != syscall.ENOENT { + t.Fatalf("expected ENOENT got %v", err) + } +} + +func TestDialListenerTimesOut(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + var d = time.Duration(10 * time.Millisecond) + _, err = DialPipe(testPipeName, &d) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func TestDialAccessDeniedWithRestrictedSD(t *testing.T) { + c := PipeConfig{ + SecurityDescriptor: "D:P(A;;0x1200FF;;;WD)", + } + l, err := ListenPipe(testPipeName, &c) + if err != nil { + t.Fatal(err) + } + defer l.Close() + _, err = DialPipe(testPipeName, nil) + if err.(*os.PathError).Err != syscall.ERROR_ACCESS_DENIED { + t.Fatalf("expected ERROR_ACCESS_DENIED, got %v", err) + } +} + +func getConnection(cfg *PipeConfig) (client net.Conn, server net.Conn, err error) { + l, err := ListenPipe(testPipeName, cfg) + if err != nil { + return + } + defer l.Close() + + type response struct { + c net.Conn + err error + } + ch := make(chan response) + go func() { + c, err := l.Accept() + ch <- response{c, err} + }() + + c, err := DialPipe(testPipeName, nil) + if err != nil { + return + } + + r := <-ch + if err = r.err; err != nil { + c.Close() + return + } + + client = c + server = r.c + return +} + +func TestReadTimeout(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + + c.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) + + buf := make([]byte, 10) + _, err = c.Read(buf) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func server(l net.Listener, ch chan int) { + c, err := l.Accept() + if err != nil { + panic(err) + } + rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) + s, err := rw.ReadString('\n') + if err != nil { + panic(err) + } + _, err = rw.WriteString("got " + s) + if err != nil { + panic(err) + } + err = rw.Flush() + if err != nil { + panic(err) + } + c.Close() + ch <- 1 +} + +func TestFullListenDialReadWrite(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + ch := make(chan int) + go server(l, ch) + + c, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + rw := bufio.NewReadWriter(bufio.NewReader(c), bufio.NewWriter(c)) + _, err = rw.WriteString("hello world\n") + if err != nil { + t.Fatal(err) + } + err = rw.Flush() + if err != nil { + t.Fatal(err) + } + + s, err := rw.ReadString('\n') + if err != nil { + t.Fatal(err) + } + ms := "got hello world\n" + if s != ms { + t.Errorf("expected '%s', got '%s'", ms, s) + } + + <-ch +} + +func TestCloseAbortsListen(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + + ch := make(chan error) + go func() { + _, err := l.Accept() + ch <- err + }() + + time.Sleep(30 * time.Millisecond) + l.Close() + + err = <-ch + if err != ErrPipeListenerClosed { + t.Fatalf("expected ErrPipeListenerClosed, got %v", err) + } +} + +func ensureEOFOnClose(t *testing.T, r io.Reader, w io.Closer) { + b := make([]byte, 10) + w.Close() + n, err := r.Read(b) + if n > 0 { + t.Errorf("unexpected byte count %d", n) + } + if err != io.EOF { + t.Errorf("expected EOF: %v", err) + } +} + +func TestCloseClientEOFServer(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + ensureEOFOnClose(t, c, s) +} + +func TestCloseServerEOFClient(t *testing.T) { + c, s, err := getConnection(nil) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + ensureEOFOnClose(t, s, c) +} + +func TestCloseWriteEOF(t *testing.T) { + cfg := &PipeConfig{ + MessageMode: true, + } + c, s, err := getConnection(cfg) + if err != nil { + t.Fatal(err) + } + defer c.Close() + defer s.Close() + + type closeWriter interface { + CloseWrite() error + } + + err = c.(closeWriter).CloseWrite() + if err != nil { + t.Fatal(err) + } + + b := make([]byte, 10) + _, err = s.Read(b) + if err != io.EOF { + t.Fatal(err) + } +} + +func TestAcceptAfterCloseFails(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + l.Close() + _, err = l.Accept() + if err != ErrPipeListenerClosed { + t.Fatalf("expected ErrPipeListenerClosed, got %v", err) + } +} + +func TestDialTimesOutByDefault(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + _, err = DialPipe(testPipeName, nil) + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } +} + +func TestTimeoutPendingRead(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + serverDone := make(chan struct{}) + + go func() { + s, err := l.Accept() + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + s.Close() + close(serverDone) + }() + + client, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + clientErr := make(chan error) + go func() { + buf := make([]byte, 10) + _, err = client.Read(buf) + clientErr <- err + }() + + time.Sleep(100 * time.Millisecond) // make *sure* the pipe is reading before we set the deadline + client.SetReadDeadline(aLongTimeAgo) + + select { + case err = <-clientErr: + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out while waiting for read to cancel") + <-clientErr + } + <-serverDone +} + +func TestTimeoutPendingWrite(t *testing.T) { + l, err := ListenPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + serverDone := make(chan struct{}) + + go func() { + s, err := l.Accept() + if err != nil { + t.Fatal(err) + } + time.Sleep(1 * time.Second) + s.Close() + close(serverDone) + }() + + client, err := DialPipe(testPipeName, nil) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + clientErr := make(chan error) + go func() { + _, err = client.Write([]byte("this should timeout")) + clientErr <- err + }() + + time.Sleep(100 * time.Millisecond) // make *sure* the pipe is writing before we set the deadline + client.SetWriteDeadline(aLongTimeAgo) + + select { + case err = <-clientErr: + if err != ErrTimeout { + t.Fatalf("expected ErrTimeout, got %v", err) + } + case <-time.After(100 * time.Millisecond): + t.Fatalf("timed out while waiting for write to cancel") + <-clientErr + } + <-serverDone +} + +type CloseWriter interface { + CloseWrite() error +} + +func TestEchoWithMessaging(t *testing.T) { + c := PipeConfig{ + MessageMode: true, // Use message mode so that CloseWrite() is supported + InputBufferSize: 65536, // Use 64KB buffers to improve performance + OutputBufferSize: 65536, + } + l, err := ListenPipe(testPipeName, &c) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + listenerDone := make(chan bool) + clientDone := make(chan bool) + go func() { + // server echo + conn, e := l.Accept() + if e != nil { + t.Fatal(e) + } + defer conn.Close() + + time.Sleep(500 * time.Millisecond) // make *sure* we don't begin to read before eof signal is sent + io.Copy(conn, conn) + conn.(CloseWriter).CloseWrite() + close(listenerDone) + }() + timeout := 1 * time.Second + client, err := DialPipe(testPipeName, &timeout) + if err != nil { + t.Fatal(err) + } + defer client.Close() + + go func() { + // client read back + bytes := make([]byte, 2) + n, e := client.Read(bytes) + if e != nil { + t.Fatal(e) + } + if n != 2 { + t.Fatalf("expected 2 bytes, got %v", n) + } + close(clientDone) + }() + + payload := make([]byte, 2) + payload[0] = 0 + payload[1] = 1 + + n, err := client.Write(payload) + if err != nil { + t.Fatal(err) + } + if n != 2 { + t.Fatalf("expected 2 bytes, got %v", n) + } + client.(CloseWriter).CloseWrite() + <-listenerDone + <-clientDone +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 0000000000..9c83d36fe5 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,202 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/privileges_test.go b/vendor/github.com/Microsoft/go-winio/privileges_test.go new file mode 100644 index 0000000000..5e94c48c23 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privileges_test.go @@ -0,0 +1,17 @@ +package winio + +import "testing" + +func TestRunWithUnavailablePrivilege(t *testing.T) { + err := RunWithPrivilege("SeCreateTokenPrivilege", func() error { return nil }) + if _, ok := err.(*PrivilegeError); err == nil || !ok { + t.Fatal("expected PrivilegeError") + } +} + +func TestRunWithPrivileges(t *testing.T) { + err := RunWithPrivilege("SeShutdownPrivilege", func() error { return nil }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 0000000000..fc1ee4d3a3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 0000000000..db1b370a1b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/sd_test.go b/vendor/github.com/Microsoft/go-winio/sd_test.go new file mode 100644 index 0000000000..847db3c162 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd_test.go @@ -0,0 +1,26 @@ +package winio + +import "testing" + +func TestLookupInvalidSid(t *testing.T) { + _, err := LookupSidByName(".\\weoifjdsklfj") + aerr, ok := err.(*AccountLookupError) + if !ok || aerr.Err != cERROR_NONE_MAPPED { + t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) + } +} + +func TestLookupValidSid(t *testing.T) { + sid, err := LookupSidByName("Everyone") + if err != nil || sid != "S-1-1-0" { + t.Fatal("expected S-1-1-0, got %s, %s", sid, err) + } +} + +func TestLookupEmptyNameFails(t *testing.T) { + _, err := LookupSidByName(".\\weoifjdsklfj") + aerr, ok := err.(*AccountLookupError) + if !ok || aerr.Err != cERROR_NONE_MAPPED { + t.Fatalf("expected AccountLookupError with ERROR_NONE_MAPPED, got %s", err) + } +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 0000000000..20d64cf41d --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 0000000000..4f7a52eeb7 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,528 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modwinmm = windows.NewLazySystemDLL("winmm.dll") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procLocalFree = modkernel32.NewProc("LocalFree") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") +) + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func timeBeginPeriod(period uint32) (n int32) { + r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) + n = int32(r0) + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func waitNamedPipe(name string, timeout uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _waitNamedPipe(_p0, timeout) +} + +func _waitNamedPipe(name *uint16, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } else { + _p1 = 0 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } else { + _p2 = 0 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore new file mode 100644 index 0000000000..3591f9ff30 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +*.test + +# Folders +_obj +_test +.vagrant + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml new file mode 100644 index 0000000000..4ca4870a8b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/.travis.yml @@ -0,0 +1,33 @@ +language: go +go: +- 1.5.4 +- 1.6.2 + +env: + global: + - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095 + - TOXIPROXY_ADDR=http://localhost:8474 + - KAFKA_INSTALL_ROOT=/home/travis/kafka + - KAFKA_HOSTNAME=localhost + - DEBUG=true + matrix: + - KAFKA_VERSION=0.8.2.2 + - KAFKA_VERSION=0.9.0.1 + - KAFKA_VERSION=0.10.0.0 + +before_install: +- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR} +- vagrant/install_cluster.sh +- vagrant/boot_cluster.sh +- vagrant/create_topics.sh + +install: +- make install_dependencies + +script: +- make test +- make vet +- make errcheck +- make fmt + +sudo: false diff --git a/vendor/github.com/Shopify/sarama/api_versions_request_test.go b/vendor/github.com/Shopify/sarama/api_versions_request_test.go new file mode 100644 index 0000000000..5ab4fa71c9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_request_test.go @@ -0,0 +1,14 @@ +package sarama + +import "testing" + +var ( + apiVersionRequest = []byte{} +) + +func TestApiVersionsRequest(t *testing.T) { + var request *ApiVersionsRequest + + request = new(ApiVersionsRequest) + testRequest(t, "basic", request, apiVersionRequest) +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response_test.go b/vendor/github.com/Shopify/sarama/api_versions_response_test.go new file mode 100644 index 0000000000..675a65a7d0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_response_test.go @@ -0,0 +1,32 @@ +package sarama + +import "testing" + +var ( + apiVersionResponse = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x03, + 0x00, 0x02, + 0x00, 0x01, + } +) + +func TestApiVersionsResponse(t *testing.T) { + var response *ApiVersionsResponse + + response = new(ApiVersionsResponse) + testVersionDecodable(t, "no error", response, apiVersionResponse, 0) + if response.Err != ErrNoError { + t.Error("Decoding error failed: no error expected but found", response.Err) + } + if response.ApiVersions[0].ApiKey != 0x03 { + t.Error("Decoding error: expected 0x03 but got", response.ApiVersions[0].ApiKey) + } + if response.ApiVersions[0].MinVersion != 0x02 { + t.Error("Decoding error: expected 0x02 but got", response.ApiVersions[0].MinVersion) + } + if response.ApiVersions[0].MaxVersion != 0x01 { + t.Error("Decoding error: expected 0x01 but got", response.ApiVersions[0].MaxVersion) + } +} diff --git a/vendor/github.com/Shopify/sarama/async_producer_test.go b/vendor/github.com/Shopify/sarama/async_producer_test.go new file mode 100644 index 0000000000..517ef2a348 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/async_producer_test.go @@ -0,0 +1,801 @@ +package sarama + +import ( + "errors" + "log" + "os" + "os/signal" + "sync" + "testing" + "time" +) + +const TestMessage = "ABC THE MESSAGE" + +func closeProducer(t *testing.T, p AsyncProducer) { + var wg sync.WaitGroup + p.AsyncClose() + + wg.Add(2) + go func() { + for _ = range p.Successes() { + t.Error("Unexpected message on Successes()") + } + wg.Done() + }() + go func() { + for msg := range p.Errors() { + t.Error(msg.Err) + } + wg.Done() + }() + wg.Wait() +} + +func expectResults(t *testing.T, p AsyncProducer, successes, errors int) { + expect := successes + errors + for expect > 0 { + select { + case msg := <-p.Errors(): + if msg.Msg.flags != 0 { + t.Error("Message had flags set") + } + errors-- + expect-- + if errors < 0 { + t.Error(msg.Err) + } + case msg := <-p.Successes(): + if msg.flags != 0 { + t.Error("Message had flags set") + } + successes-- + expect-- + if successes < 0 { + t.Error("Too many successes") + } + } + } + if successes != 0 || errors != 0 { + t.Error("Unexpected successes", successes, "or errors", errors) + } +} + +type testPartitioner chan *int32 + +func (p testPartitioner) Partition(msg *ProducerMessage, numPartitions int32) (int32, error) { + part := <-p + if part == nil { + return 0, errors.New("BOOM") + } + + return *part, nil +} + +func (p testPartitioner) RequiresConsistency() bool { + return true +} + +func (p testPartitioner) feed(partition int32) { + p <- &partition +} + +type flakyEncoder bool + +func (f flakyEncoder) Length() int { + return len(TestMessage) +} + +func (f flakyEncoder) Encode() ([]byte, error) { + if !bool(f) { + return nil, errors.New("flaky encoding error") + } + return []byte(TestMessage), nil +} + +func TestAsyncProducer(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Metadata: i} + } + for i := 0; i < 10; i++ { + select { + case msg := <-producer.Errors(): + t.Error(msg.Err) + if msg.Msg.flags != 0 { + t.Error("Message had flags set") + } + case msg := <-producer.Successes(): + if msg.flags != 0 { + t.Error("Message had flags set") + } + if msg.Metadata.(int) != i { + t.Error("Message metadata did not match") + } + } + } + + closeProducer(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestAsyncProducerMultipleFlushes(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + leader.Returns(prodSuccess) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 5 + config.Producer.Return.Successes = true + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for flush := 0; flush < 3; flush++ { + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + expectResults(t, producer, 5, 0) + } + + closeProducer(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestAsyncProducerMultipleBrokers(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader0 := NewMockBroker(t, 2) + leader1 := NewMockBroker(t, 3) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader0.Addr(), leader0.BrokerID()) + metadataResponse.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader0.BrokerID(), nil, nil, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodResponse0 := new(ProduceResponse) + prodResponse0.AddTopicPartition("my_topic", 0, ErrNoError) + leader0.Returns(prodResponse0) + + prodResponse1 := new(ProduceResponse) + prodResponse1.AddTopicPartition("my_topic", 1, ErrNoError) + leader1.Returns(prodResponse1) + + config := NewConfig() + config.Producer.Flush.Messages = 5 + config.Producer.Return.Successes = true + config.Producer.Partitioner = NewRoundRobinPartitioner + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + expectResults(t, producer, 10, 0) + + closeProducer(t, producer) + leader1.Close() + leader0.Close() + seedBroker.Close() +} + +func TestAsyncProducerCustomPartitioner(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodResponse := new(ProduceResponse) + prodResponse.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodResponse) + + config := NewConfig() + config.Producer.Flush.Messages = 2 + config.Producer.Return.Successes = true + config.Producer.Partitioner = func(topic string) Partitioner { + p := make(testPartitioner) + go func() { + p.feed(0) + p <- nil + p <- nil + p <- nil + p.feed(0) + }() + return p + } + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + expectResults(t, producer, 2, 3) + + closeProducer(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestAsyncProducerFailureRetry(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader1 := NewMockBroker(t, 2) + leader2 := NewMockBroker(t, 3) + + metadataLeader1 := new(MetadataResponse) + metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader1) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + seedBroker.Close() + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader1.Returns(prodNotLeader) + + metadataLeader2 := new(MetadataResponse) + metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) + metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) + leader1.Returns(metadataLeader2) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + leader1.Close() + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + leader2.Close() + closeProducer(t, producer) +} + +func TestAsyncProducerEncoderFailures(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + leader.Returns(prodSuccess) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 1 + config.Producer.Return.Successes = true + config.Producer.Partitioner = NewManualPartitioner + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for flush := 0; flush < 3; flush++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(false)} + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(false), Value: flakyEncoder(true)} + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: flakyEncoder(true), Value: flakyEncoder(true)} + expectResults(t, producer, 1, 2) + } + + closeProducer(t, producer) + leader.Close() + seedBroker.Close() +} + +// If a Kafka broker becomes unavailable and then returns back in service, then +// producer reconnects to it and continues sending messages. +func TestAsyncProducerBrokerBounce(t *testing.T) { + // Given + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + leaderAddr := leader.Addr() + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + + config := NewConfig() + config.Producer.Flush.Messages = 1 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // When: a broker connection gets reset by a broker (network glitch, restart, you name it). + leader.Close() // producer should get EOF + leader = NewMockBrokerAddr(t, 2, leaderAddr) // start it up again right away for giggles + seedBroker.Returns(metadataResponse) // tell it to go to broker 2 again + + // Then: a produced message goes through the new broker connection. + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + closeProducer(t, producer) + seedBroker.Close() + leader.Close() +} + +func TestAsyncProducerBrokerBounceWithStaleMetadata(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader1 := NewMockBroker(t, 2) + leader2 := NewMockBroker(t, 3) + + metadataLeader1 := new(MetadataResponse) + metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader1) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Max = 3 + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + leader1.Close() // producer should get EOF + seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down + seedBroker.Returns(metadataLeader1) // tell it to go to leader1 again even though it's still down + + // ok fine, tell it to go to leader2 finally + metadataLeader2 := new(MetadataResponse) + metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) + metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader2) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + seedBroker.Close() + leader2.Close() + + closeProducer(t, producer) +} + +func TestAsyncProducerMultipleRetries(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader1 := NewMockBroker(t, 2) + leader2 := NewMockBroker(t, 3) + + metadataLeader1 := new(MetadataResponse) + metadataLeader1.AddBroker(leader1.Addr(), leader1.BrokerID()) + metadataLeader1.AddTopicPartition("my_topic", 0, leader1.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader1) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Max = 4 + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader1.Returns(prodNotLeader) + + metadataLeader2 := new(MetadataResponse) + metadataLeader2.AddBroker(leader2.Addr(), leader2.BrokerID()) + metadataLeader2.AddTopicPartition("my_topic", 0, leader2.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader2) + leader2.Returns(prodNotLeader) + seedBroker.Returns(metadataLeader1) + leader1.Returns(prodNotLeader) + seedBroker.Returns(metadataLeader1) + leader1.Returns(prodNotLeader) + seedBroker.Returns(metadataLeader2) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + leader2.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + seedBroker.Close() + leader1.Close() + leader2.Close() + closeProducer(t, producer) +} + +func TestAsyncProducerOutOfRetries(t *testing.T) { + t.Skip("Enable once bug #294 is fixed.") + + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + config.Producer.Retry.Max = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader.Returns(prodNotLeader) + + for i := 0; i < 10; i++ { + select { + case msg := <-producer.Errors(): + if msg.Err != ErrNotLeaderForPartition { + t.Error(msg.Err) + } + case <-producer.Successes(): + t.Error("Unexpected success") + } + } + + seedBroker.Returns(metadataResponse) + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + + expectResults(t, producer, 10, 0) + + leader.Close() + seedBroker.Close() + safeClose(t, producer) +} + +func TestAsyncProducerRetryWithReferenceOpen(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + leaderAddr := leader.Addr() + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leaderAddr, leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + config.Producer.Retry.Max = 1 + config.Producer.Partitioner = NewRoundRobinPartitioner + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + // prime partition 0 + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // prime partition 1 + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + prodSuccess = new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 1, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // reboot the broker (the producer will get EOF on its existing connection) + leader.Close() + leader = NewMockBrokerAddr(t, 2, leaderAddr) + + // send another message on partition 0 to trigger the EOF and retry + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + + // tell partition 0 to go to that broker again + seedBroker.Returns(metadataResponse) + + // succeed this time + prodSuccess = new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 1, 0) + + // shutdown + closeProducer(t, producer) + seedBroker.Close() + leader.Close() +} + +func TestAsyncProducerFlusherRetryCondition(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Producer.Flush.Messages = 5 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + config.Producer.Retry.Max = 1 + config.Producer.Partitioner = NewManualPartitioner + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + // prime partitions + for p := int32(0); p < 2; p++ { + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: p} + } + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", p, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 5, 0) + } + + // send more messages on partition 0 + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} + } + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader.Returns(prodNotLeader) + + time.Sleep(50 * time.Millisecond) + + leader.SetHandlerByMap(map[string]MockResponse{ + "ProduceRequest": NewMockProduceResponse(t). + SetError("my_topic", 0, ErrNoError), + }) + + // tell partition 0 to go to that broker again + seedBroker.Returns(metadataResponse) + + // succeed this time + expectResults(t, producer, 5, 0) + + // put five more through + for i := 0; i < 5; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage), Partition: 0} + } + expectResults(t, producer, 5, 0) + + // shutdown + closeProducer(t, producer) + seedBroker.Close() + leader.Close() +} + +func TestAsyncProducerRetryShutdown(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataLeader := new(MetadataResponse) + metadataLeader.AddBroker(leader.Addr(), leader.BrokerID()) + metadataLeader.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataLeader) + + config := NewConfig() + config.Producer.Flush.Messages = 10 + config.Producer.Return.Successes = true + config.Producer.Retry.Backoff = 0 + producer, err := NewAsyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder(TestMessage)} + } + producer.AsyncClose() + time.Sleep(5 * time.Millisecond) // let the shutdown goroutine kick in + + producer.Input() <- &ProducerMessage{Topic: "FOO"} + if err := <-producer.Errors(); err.Err != ErrShuttingDown { + t.Error(err) + } + + prodNotLeader := new(ProduceResponse) + prodNotLeader.AddTopicPartition("my_topic", 0, ErrNotLeaderForPartition) + leader.Returns(prodNotLeader) + + seedBroker.Returns(metadataLeader) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + expectResults(t, producer, 10, 0) + + seedBroker.Close() + leader.Close() + + // wait for the async-closed producer to shut down fully + for err := range producer.Errors() { + t.Error(err) + } +} + +// This example shows how to use the producer while simultaneously +// reading the Errors channel to know about any failures. +func ExampleAsyncProducer_select() { + producer, err := NewAsyncProducer([]string{"localhost:9092"}, nil) + if err != nil { + panic(err) + } + + defer func() { + if err := producer.Close(); err != nil { + log.Fatalln(err) + } + }() + + // Trap SIGINT to trigger a shutdown. + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + + var enqueued, errors int +ProducerLoop: + for { + select { + case producer.Input() <- &ProducerMessage{Topic: "my_topic", Key: nil, Value: StringEncoder("testing 123")}: + enqueued++ + case err := <-producer.Errors(): + log.Println("Failed to produce message", err) + errors++ + case <-signals: + break ProducerLoop + } + } + + log.Printf("Enqueued: %d; errors: %d\n", enqueued, errors) +} + +// This example shows how to use the producer with separate goroutines +// reading from the Successes and Errors channels. Note that in order +// for the Successes channel to be populated, you have to set +// config.Producer.Return.Successes to true. +func ExampleAsyncProducer_goroutines() { + config := NewConfig() + config.Producer.Return.Successes = true + producer, err := NewAsyncProducer([]string{"localhost:9092"}, config) + if err != nil { + panic(err) + } + + // Trap SIGINT to trigger a graceful shutdown. + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + + var ( + wg sync.WaitGroup + enqueued, successes, errors int + ) + + wg.Add(1) + go func() { + defer wg.Done() + for _ = range producer.Successes() { + successes++ + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + for err := range producer.Errors() { + log.Println(err) + errors++ + } + }() + +ProducerLoop: + for { + message := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} + select { + case producer.Input() <- message: + enqueued++ + + case <-signals: + producer.AsyncClose() // Trigger a shutdown of the producer. + break ProducerLoop + } + } + + wg.Wait() + + log.Printf("Successfully produced: %d; errors: %d\n", successes, errors) +} diff --git a/vendor/github.com/Shopify/sarama/broker_test.go b/vendor/github.com/Shopify/sarama/broker_test.go new file mode 100644 index 0000000000..53e8baf49f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/broker_test.go @@ -0,0 +1,253 @@ +package sarama + +import ( + "fmt" + "testing" +) + +func ExampleBroker() { + broker := NewBroker("localhost:9092") + err := broker.Open(nil) + if err != nil { + panic(err) + } + + request := MetadataRequest{Topics: []string{"myTopic"}} + response, err := broker.GetMetadata(&request) + if err != nil { + _ = broker.Close() + panic(err) + } + + fmt.Println("There are", len(response.Topics), "topics active in the cluster.") + + if err = broker.Close(); err != nil { + panic(err) + } +} + +type mockEncoder struct { + bytes []byte +} + +func (m mockEncoder) encode(pe packetEncoder) error { + return pe.putRawBytes(m.bytes) +} + +func TestBrokerAccessors(t *testing.T) { + broker := NewBroker("abc:123") + + if broker.ID() != -1 { + t.Error("New broker didn't have an ID of -1.") + } + + if broker.Addr() != "abc:123" { + t.Error("New broker didn't have the correct address") + } + + broker.id = 34 + if broker.ID() != 34 { + t.Error("Manually setting broker ID did not take effect.") + } +} + +func TestSimpleBrokerCommunication(t *testing.T) { + mb := NewMockBroker(t, 0) + defer mb.Close() + + broker := NewBroker(mb.Addr()) + conf := NewConfig() + conf.Version = V0_10_0_0 + err := broker.Open(conf) + if err != nil { + t.Fatal(err) + } + + for _, tt := range brokerTestTable { + mb.Returns(&mockEncoder{tt.response}) + } + for _, tt := range brokerTestTable { + tt.runner(t, broker) + } + + err = broker.Close() + if err != nil { + t.Error(err) + } +} + +// We're not testing encoding/decoding here, so most of the requests/responses will be empty for simplicity's sake +var brokerTestTable = []struct { + response []byte + runner func(*testing.T, *Broker) +}{ + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := MetadataRequest{} + response, err := broker.GetMetadata(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Metadata request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 't', 0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := ConsumerMetadataRequest{} + response, err := broker.GetConsumerMetadata(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Consumer Metadata request got no response!") + } + }}, + + {[]byte{}, + func(t *testing.T, broker *Broker) { + request := ProduceRequest{} + request.RequiredAcks = NoResponse + response, err := broker.Produce(&request) + if err != nil { + t.Error(err) + } + if response != nil { + t.Error("Produce request with NoResponse got a response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := ProduceRequest{} + request.RequiredAcks = WaitForLocal + response, err := broker.Produce(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Produce request without NoResponse got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := FetchRequest{} + response, err := broker.Fetch(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Fetch request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := OffsetFetchRequest{} + response, err := broker.FetchOffset(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("OffsetFetch request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := OffsetCommitRequest{} + response, err := broker.CommitOffset(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("OffsetCommit request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := OffsetRequest{} + response, err := broker.GetAvailableOffsets(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Offset request got no response!") + } + }}, + + {[]byte{0x00, 0x17, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := JoinGroupRequest{} + response, err := broker.JoinGroup(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("JoinGroup request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := SyncGroupRequest{} + response, err := broker.SyncGroup(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("SyncGroup request got no response!") + } + }}, + + {[]byte{0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := LeaveGroupRequest{} + response, err := broker.LeaveGroup(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("LeaveGroup request got no response!") + } + }}, + + {[]byte{0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := HeartbeatRequest{} + response, err := broker.Heartbeat(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("Heartbeat request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := ListGroupsRequest{} + response, err := broker.ListGroups(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("ListGroups request got no response!") + } + }}, + + {[]byte{0x00, 0x00, 0x00, 0x00}, + func(t *testing.T, broker *Broker) { + request := DescribeGroupsRequest{} + response, err := broker.DescribeGroups(&request) + if err != nil { + t.Error(err) + } + if response == nil { + t.Error("DescribeGroups request got no response!") + } + }}, +} diff --git a/vendor/github.com/Shopify/sarama/client_test.go b/vendor/github.com/Shopify/sarama/client_test.go new file mode 100644 index 0000000000..b0559466f2 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/client_test.go @@ -0,0 +1,608 @@ +package sarama + +import ( + "io" + "sync" + "testing" + "time" +) + +func safeClose(t testing.TB, c io.Closer) { + err := c.Close() + if err != nil { + t.Error(err) + } +} + +func TestSimpleClient(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + + seedBroker.Returns(new(MetadataResponse)) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestCachedPartitions(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + + replicas := []int32{3, 1, 5} + isr := []int32{5, 1} + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker("localhost:12345", 2) + metadataResponse.AddTopicPartition("my_topic", 0, 2, replicas, isr, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, 2, replicas, isr, ErrLeaderNotAvailable) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + c, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + client := c.(*client) + + // Verify they aren't cached the same + allP := client.cachedPartitionsResults["my_topic"][allPartitions] + writeP := client.cachedPartitionsResults["my_topic"][writablePartitions] + if len(allP) == len(writeP) { + t.Fatal("Invalid lengths!") + } + + tmp := client.cachedPartitionsResults["my_topic"] + // Verify we actually use the cache at all! + tmp[allPartitions] = []int32{1, 2, 3, 4} + client.cachedPartitionsResults["my_topic"] = tmp + if 4 != len(client.cachedPartitions("my_topic", allPartitions)) { + t.Fatal("Not using the cache!") + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestClientDoesntCachePartitionsForTopicsWithErrors(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + + replicas := []int32{seedBroker.BrokerID()} + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(seedBroker.Addr(), seedBroker.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 1, replicas[0], replicas, replicas, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 2, replicas[0], replicas, replicas, ErrNoError) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + metadataResponse = new(MetadataResponse) + metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataResponse) + + partitions, err := client.Partitions("unknown") + + if err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + if partitions != nil { + t.Errorf("Should return nil as partition list, found %v", partitions) + } + + // Should still use the cache of a known topic + partitions, err = client.Partitions("my_topic") + if err != nil { + t.Errorf("Expected no error, found %v", err) + } + + metadataResponse = new(MetadataResponse) + metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataResponse) + + // Should not use cache for unknown topic + partitions, err = client.Partitions("unknown") + if err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + if partitions != nil { + t.Errorf("Should return nil as partition list, found %v", partitions) + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestClientSeedBrokers(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker("localhost:12345", 2) + seedBroker.Returns(metadataResponse) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + seedBroker.Close() + safeClose(t, client) +} + +func TestClientMetadata(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 5) + + replicas := []int32{3, 1, 5} + isr := []int32{5, 1} + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), replicas, isr, ErrNoError) + metadataResponse.AddTopicPartition("my_topic", 1, leader.BrokerID(), replicas, isr, ErrLeaderNotAvailable) + seedBroker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + topics, err := client.Topics() + if err != nil { + t.Error(err) + } else if len(topics) != 1 || topics[0] != "my_topic" { + t.Error("Client returned incorrect topics:", topics) + } + + parts, err := client.Partitions("my_topic") + if err != nil { + t.Error(err) + } else if len(parts) != 2 || parts[0] != 0 || parts[1] != 1 { + t.Error("Client returned incorrect partitions for my_topic:", parts) + } + + parts, err = client.WritablePartitions("my_topic") + if err != nil { + t.Error(err) + } else if len(parts) != 1 || parts[0] != 0 { + t.Error("Client returned incorrect writable partitions for my_topic:", parts) + } + + tst, err := client.Leader("my_topic", 0) + if err != nil { + t.Error(err) + } else if tst.ID() != 5 { + t.Error("Leader for my_topic had incorrect ID.") + } + + replicas, err = client.Replicas("my_topic", 0) + if err != nil { + t.Error(err) + } else if replicas[0] != 1 { + t.Error("Incorrect (or unsorted) replica") + } else if replicas[1] != 3 { + t.Error("Incorrect (or unsorted) replica") + } else if replicas[2] != 5 { + t.Error("Incorrect (or unsorted) replica") + } + + leader.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientGetOffset(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + leaderAddr := leader.Addr() + + metadata := new(MetadataResponse) + metadata.AddTopicPartition("foo", 0, leader.BrokerID(), nil, nil, ErrNoError) + metadata.AddBroker(leaderAddr, leader.BrokerID()) + seedBroker.Returns(metadata) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + offsetResponse := new(OffsetResponse) + offsetResponse.AddTopicPartition("foo", 0, 123) + leader.Returns(offsetResponse) + + offset, err := client.GetOffset("foo", 0, OffsetNewest) + if err != nil { + t.Error(err) + } + if offset != 123 { + t.Error("Unexpected offset, got ", offset) + } + + leader.Close() + seedBroker.Returns(metadata) + + leader = NewMockBrokerAddr(t, 2, leaderAddr) + offsetResponse = new(OffsetResponse) + offsetResponse.AddTopicPartition("foo", 0, 456) + leader.Returns(offsetResponse) + + offset, err = client.GetOffset("foo", 0, OffsetNewest) + if err != nil { + t.Error(err) + } + if offset != 456 { + t.Error("Unexpected offset, got ", offset) + } + + seedBroker.Close() + leader.Close() + safeClose(t, client) +} + +func TestClientReceivingUnknownTopic(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + + metadataResponse1 := new(MetadataResponse) + seedBroker.Returns(metadataResponse1) + + config := NewConfig() + config.Metadata.Retry.Max = 1 + config.Metadata.Retry.Backoff = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + metadataUnknownTopic := new(MetadataResponse) + metadataUnknownTopic.AddTopic("new_topic", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataUnknownTopic) + seedBroker.Returns(metadataUnknownTopic) + + if err := client.RefreshMetadata("new_topic"); err != ErrUnknownTopicOrPartition { + t.Error("ErrUnknownTopicOrPartition expected, got", err) + } + + // If we are asking for the leader of a partition of the non-existing topic. + // we will request metadata again. + seedBroker.Returns(metadataUnknownTopic) + seedBroker.Returns(metadataUnknownTopic) + + if _, err = client.Leader("new_topic", 1); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + safeClose(t, client) + seedBroker.Close() +} + +func TestClientReceivingPartialMetadata(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 5) + + metadataResponse1 := new(MetadataResponse) + metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) + seedBroker.Returns(metadataResponse1) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + replicas := []int32{leader.BrokerID(), seedBroker.BrokerID()} + + metadataPartial := new(MetadataResponse) + metadataPartial.AddTopic("new_topic", ErrLeaderNotAvailable) + metadataPartial.AddTopicPartition("new_topic", 0, leader.BrokerID(), replicas, replicas, ErrNoError) + metadataPartial.AddTopicPartition("new_topic", 1, -1, replicas, []int32{}, ErrLeaderNotAvailable) + seedBroker.Returns(metadataPartial) + + if err := client.RefreshMetadata("new_topic"); err != nil { + t.Error("ErrLeaderNotAvailable should not make RefreshMetadata respond with an error") + } + + // Even though the metadata was incomplete, we should be able to get the leader of a partition + // for which we did get a useful response, without doing additional requests. + + partition0Leader, err := client.Leader("new_topic", 0) + if err != nil { + t.Error(err) + } else if partition0Leader.Addr() != leader.Addr() { + t.Error("Unexpected leader returned", partition0Leader.Addr()) + } + + // If we are asking for the leader of a partition that didn't have a leader before, + // we will do another metadata request. + + seedBroker.Returns(metadataPartial) + + // Still no leader for the partition, so asking for it should return an error. + _, err = client.Leader("new_topic", 1) + if err != ErrLeaderNotAvailable { + t.Error("Expected ErrLeaderNotAvailable, got", err) + } + + safeClose(t, client) + seedBroker.Close() + leader.Close() +} + +func TestClientRefreshBehaviour(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 5) + + metadataResponse1 := new(MetadataResponse) + metadataResponse1.AddBroker(leader.Addr(), leader.BrokerID()) + seedBroker.Returns(metadataResponse1) + + metadataResponse2 := new(MetadataResponse) + metadataResponse2.AddTopicPartition("my_topic", 0xb, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse2) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + parts, err := client.Partitions("my_topic") + if err != nil { + t.Error(err) + } else if len(parts) != 1 || parts[0] != 0xb { + t.Error("Client returned incorrect partitions for my_topic:", parts) + } + + tst, err := client.Leader("my_topic", 0xb) + if err != nil { + t.Error(err) + } else if tst.ID() != 5 { + t.Error("Leader for my_topic had incorrect ID.") + } + + leader.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientResurrectDeadSeeds(t *testing.T) { + initialSeed := NewMockBroker(t, 0) + emptyMetadata := new(MetadataResponse) + initialSeed.Returns(emptyMetadata) + + conf := NewConfig() + conf.Metadata.Retry.Backoff = 0 + conf.Metadata.RefreshFrequency = 0 + c, err := NewClient([]string{initialSeed.Addr()}, conf) + if err != nil { + t.Fatal(err) + } + initialSeed.Close() + + client := c.(*client) + + seed1 := NewMockBroker(t, 1) + seed2 := NewMockBroker(t, 2) + seed3 := NewMockBroker(t, 3) + addr1 := seed1.Addr() + addr2 := seed2.Addr() + addr3 := seed3.Addr() + + // Overwrite the seed brokers with a fixed ordering to make this test deterministic. + safeClose(t, client.seedBrokers[0]) + client.seedBrokers = []*Broker{NewBroker(addr1), NewBroker(addr2), NewBroker(addr3)} + client.deadSeeds = []*Broker{} + + wg := sync.WaitGroup{} + wg.Add(1) + go func() { + if err := client.RefreshMetadata(); err != nil { + t.Error(err) + } + wg.Done() + }() + seed1.Close() + seed2.Close() + + seed1 = NewMockBrokerAddr(t, 1, addr1) + seed2 = NewMockBrokerAddr(t, 2, addr2) + + seed3.Close() + + seed1.Close() + seed2.Returns(emptyMetadata) + + wg.Wait() + + if len(client.seedBrokers) != 2 { + t.Error("incorrect number of live seeds") + } + if len(client.deadSeeds) != 1 { + t.Error("incorrect number of dead seeds") + } + + safeClose(t, c) +} + +func TestClientCoordinatorWithConsumerOffsetsTopic(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + staleCoordinator := NewMockBroker(t, 2) + freshCoordinator := NewMockBroker(t, 3) + + replicas := []int32{staleCoordinator.BrokerID(), freshCoordinator.BrokerID()} + metadataResponse1 := new(MetadataResponse) + metadataResponse1.AddBroker(staleCoordinator.Addr(), staleCoordinator.BrokerID()) + metadataResponse1.AddBroker(freshCoordinator.Addr(), freshCoordinator.BrokerID()) + metadataResponse1.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) + seedBroker.Returns(metadataResponse1) + + client, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + coordinatorResponse1 := new(ConsumerMetadataResponse) + coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable + seedBroker.Returns(coordinatorResponse1) + + coordinatorResponse2 := new(ConsumerMetadataResponse) + coordinatorResponse2.CoordinatorID = staleCoordinator.BrokerID() + coordinatorResponse2.CoordinatorHost = "127.0.0.1" + coordinatorResponse2.CoordinatorPort = staleCoordinator.Port() + + seedBroker.Returns(coordinatorResponse2) + + broker, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if staleCoordinator.Addr() != broker.Addr() { + t.Errorf("Expected coordinator to have address %s, found %s", staleCoordinator.Addr(), broker.Addr()) + } + + if staleCoordinator.BrokerID() != broker.ID() { + t.Errorf("Expected coordinator to have ID %d, found %d", staleCoordinator.BrokerID(), broker.ID()) + } + + // Grab the cached value + broker2, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if broker2.Addr() != broker.Addr() { + t.Errorf("Expected the coordinator to be the same, but found %s vs. %s", broker2.Addr(), broker.Addr()) + } + + coordinatorResponse3 := new(ConsumerMetadataResponse) + coordinatorResponse3.CoordinatorID = freshCoordinator.BrokerID() + coordinatorResponse3.CoordinatorHost = "127.0.0.1" + coordinatorResponse3.CoordinatorPort = freshCoordinator.Port() + + seedBroker.Returns(coordinatorResponse3) + + // Refresh the locally cahced value because it's stale + if err := client.RefreshCoordinator("my_group"); err != nil { + t.Error(err) + } + + // Grab the fresh value + broker3, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if broker3.Addr() != freshCoordinator.Addr() { + t.Errorf("Expected the freshCoordinator to be returned, but found %s.", broker3.Addr()) + } + + freshCoordinator.Close() + staleCoordinator.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientCoordinatorWithoutConsumerOffsetsTopic(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + coordinator := NewMockBroker(t, 2) + + metadataResponse1 := new(MetadataResponse) + seedBroker.Returns(metadataResponse1) + + config := NewConfig() + config.Metadata.Retry.Max = 1 + config.Metadata.Retry.Backoff = 0 + client, err := NewClient([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + coordinatorResponse1 := new(ConsumerMetadataResponse) + coordinatorResponse1.Err = ErrConsumerCoordinatorNotAvailable + seedBroker.Returns(coordinatorResponse1) + + metadataResponse2 := new(MetadataResponse) + metadataResponse2.AddTopic("__consumer_offsets", ErrUnknownTopicOrPartition) + seedBroker.Returns(metadataResponse2) + + replicas := []int32{coordinator.BrokerID()} + metadataResponse3 := new(MetadataResponse) + metadataResponse3.AddTopicPartition("__consumer_offsets", 0, replicas[0], replicas, replicas, ErrNoError) + seedBroker.Returns(metadataResponse3) + + coordinatorResponse2 := new(ConsumerMetadataResponse) + coordinatorResponse2.CoordinatorID = coordinator.BrokerID() + coordinatorResponse2.CoordinatorHost = "127.0.0.1" + coordinatorResponse2.CoordinatorPort = coordinator.Port() + + seedBroker.Returns(coordinatorResponse2) + + broker, err := client.Coordinator("my_group") + if err != nil { + t.Error(err) + } + + if coordinator.Addr() != broker.Addr() { + t.Errorf("Expected coordinator to have address %s, found %s", coordinator.Addr(), broker.Addr()) + } + + if coordinator.BrokerID() != broker.ID() { + t.Errorf("Expected coordinator to have ID %d, found %d", coordinator.BrokerID(), broker.ID()) + } + + coordinator.Close() + seedBroker.Close() + safeClose(t, client) +} + +func TestClientAutorefreshShutdownRace(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + + metadataResponse := new(MetadataResponse) + seedBroker.Returns(metadataResponse) + + conf := NewConfig() + conf.Metadata.RefreshFrequency = 100 * time.Millisecond + client, err := NewClient([]string{seedBroker.Addr()}, conf) + if err != nil { + t.Fatal(err) + } + + // Wait for the background refresh to kick in + time.Sleep(110 * time.Millisecond) + + done := make(chan none) + go func() { + // Close the client + if err := client.Close(); err != nil { + t.Fatal(err) + } + close(done) + }() + + // Wait for the Close to kick in + time.Sleep(10 * time.Millisecond) + + // Then return some metadata to the still-running background thread + leader := NewMockBroker(t, 2) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("foo", 0, leader.BrokerID(), []int32{2}, []int32{2}, ErrNoError) + seedBroker.Returns(metadataResponse) + + <-done + + seedBroker.Close() + + // give the update time to happen so we get a panic if it's still running (which it shouldn't) + time.Sleep(10 * time.Millisecond) +} diff --git a/vendor/github.com/Shopify/sarama/config_test.go b/vendor/github.com/Shopify/sarama/config_test.go new file mode 100644 index 0000000000..08bcaa4213 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config_test.go @@ -0,0 +1,26 @@ +package sarama + +import "testing" + +func TestDefaultConfigValidates(t *testing.T) { + config := NewConfig() + if err := config.Validate(); err != nil { + t.Error(err) + } +} + +func TestInvalidClientIDConfigValidates(t *testing.T) { + config := NewConfig() + config.ClientID = "foo:bar" + if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" { + t.Error("Expected invalid ClientID, got ", err) + } +} + +func TestEmptyClientIDConfigValidates(t *testing.T) { + config := NewConfig() + config.ClientID = "" + if err := config.Validate(); string(err.(ConfigurationError)) != "ClientID is invalid" { + t.Error("Expected invalid ClientID, got ", err) + } +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members_test.go b/vendor/github.com/Shopify/sarama/consumer_group_members_test.go new file mode 100644 index 0000000000..1c1d154abb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group_members_test.go @@ -0,0 +1,73 @@ +package sarama + +import ( + "bytes" + "reflect" + "testing" +) + +var ( + groupMemberMetadata = []byte{ + 0, 1, // Version + 0, 0, 0, 2, // Topic array length + 0, 3, 'o', 'n', 'e', // Topic one + 0, 3, 't', 'w', 'o', // Topic two + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata + } + groupMemberAssignment = []byte{ + 0, 1, // Version + 0, 0, 0, 1, // Topic array length + 0, 3, 'o', 'n', 'e', // Topic one + 0, 0, 0, 3, // Topic one, partition array length + 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 4, // 0, 2, 4 + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Userdata + } +) + +func TestConsumerGroupMemberMetadata(t *testing.T) { + meta := &ConsumerGroupMemberMetadata{ + Version: 1, + Topics: []string{"one", "two"}, + UserData: []byte{0x01, 0x02, 0x03}, + } + + buf, err := encode(meta) + if err != nil { + t.Error("Failed to encode data", err) + } else if !bytes.Equal(groupMemberMetadata, buf) { + t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberMetadata, buf) + } + + meta2 := new(ConsumerGroupMemberMetadata) + err = decode(buf, meta2) + if err != nil { + t.Error("Failed to decode data", err) + } else if !reflect.DeepEqual(meta, meta2) { + t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", meta, meta2) + } +} + +func TestConsumerGroupMemberAssignment(t *testing.T) { + amt := &ConsumerGroupMemberAssignment{ + Version: 1, + Topics: map[string][]int32{ + "one": []int32{0, 2, 4}, + }, + UserData: []byte{0x01, 0x02, 0x03}, + } + + buf, err := encode(amt) + if err != nil { + t.Error("Failed to encode data", err) + } else if !bytes.Equal(groupMemberAssignment, buf) { + t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", groupMemberAssignment, buf) + } + + amt2 := new(ConsumerGroupMemberAssignment) + err = decode(buf, amt2) + if err != nil { + t.Error("Failed to decode data", err) + } else if !reflect.DeepEqual(amt, amt2) { + t.Errorf("Encoded data does not match expectation\nexpected: %v\nactual: %v", amt, amt2) + } +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go new file mode 100644 index 0000000000..4509631a04 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request_test.go @@ -0,0 +1,19 @@ +package sarama + +import "testing" + +var ( + consumerMetadataRequestEmpty = []byte{ + 0x00, 0x00} + + consumerMetadataRequestString = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r'} +) + +func TestConsumerMetadataRequest(t *testing.T) { + request := new(ConsumerMetadataRequest) + testRequest(t, "empty string", request, consumerMetadataRequestEmpty) + + request.ConsumerGroup = "foobar" + testRequest(t, "with string", request, consumerMetadataRequestString) +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go new file mode 100644 index 0000000000..b748784d7e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response_test.go @@ -0,0 +1,35 @@ +package sarama + +import "testing" + +var ( + consumerMetadataResponseError = []byte{ + 0x00, 0x0E, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + consumerMetadataResponseSuccess = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0xAB, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0xCC, 0xDD} +) + +func TestConsumerMetadataResponseError(t *testing.T) { + response := ConsumerMetadataResponse{Err: ErrOffsetsLoadInProgress} + testResponse(t, "error", &response, consumerMetadataResponseError) +} + +func TestConsumerMetadataResponseSuccess(t *testing.T) { + broker := NewBroker("foo:52445") + broker.id = 0xAB + response := ConsumerMetadataResponse{ + Coordinator: broker, + CoordinatorID: 0xAB, + CoordinatorHost: "foo", + CoordinatorPort: 0xCCDD, + Err: ErrNoError, + } + testResponse(t, "success", &response, consumerMetadataResponseSuccess) +} diff --git a/vendor/github.com/Shopify/sarama/consumer_test.go b/vendor/github.com/Shopify/sarama/consumer_test.go new file mode 100644 index 0000000000..387ede314b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_test.go @@ -0,0 +1,854 @@ +package sarama + +import ( + "log" + "os" + "os/signal" + "sync" + "testing" + "time" +) + +var testMsg = StringEncoder("Foo") + +// If a particular offset is provided then messages are consumed starting from +// that offset. +func TestConsumerOffsetManual(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + + mockFetchResponse := NewMockFetchResponse(t, 1) + for i := 0; i < 10; i++ { + mockFetchResponse.SetMessage("my_topic", 0, int64(i+1234), testMsg) + } + + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 2345), + "FetchRequest": mockFetchResponse, + }) + + // When + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + consumer, err := master.ConsumePartition("my_topic", 0, 1234) + if err != nil { + t.Fatal(err) + } + + // Then: messages starting from offset 1234 are consumed. + for i := 0; i < 10; i++ { + select { + case message := <-consumer.Messages(): + assertMessageOffset(t, message, int64(i+1234)) + case err := <-consumer.Errors(): + t.Error(err) + } + } + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// If `OffsetNewest` is passed as the initial offset then the first consumed +// message is indeed corresponds to the offset that broker claims to be the +// newest in its metadata response. +func TestConsumerOffsetNewest(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 10). + SetOffset("my_topic", 0, OffsetOldest, 7), + "FetchRequest": NewMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 9, testMsg). + SetMessage("my_topic", 0, 10, testMsg). + SetMessage("my_topic", 0, 11, testMsg). + SetHighWaterMark("my_topic", 0, 14), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, OffsetNewest) + if err != nil { + t.Fatal(err) + } + + // Then + assertMessageOffset(t, <-consumer.Messages(), 10) + if hwmo := consumer.HighWaterMarkOffset(); hwmo != 14 { + t.Errorf("Expected high water mark offset 14, found %d", hwmo) + } + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// It is possible to close a partition consumer and create the same anew. +func TestConsumerRecreate(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": NewMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 10, testMsg), + }) + + c, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + pc, err := c.ConsumePartition("my_topic", 0, 10) + if err != nil { + t.Fatal(err) + } + assertMessageOffset(t, <-pc.Messages(), 10) + + // When + safeClose(t, pc) + pc, err = c.ConsumePartition("my_topic", 0, 10) + if err != nil { + t.Fatal(err) + } + + // Then + assertMessageOffset(t, <-pc.Messages(), 10) + + safeClose(t, pc) + safeClose(t, c) + broker0.Close() +} + +// An attempt to consume the same partition twice should fail. +func TestConsumerDuplicate(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": NewMockFetchResponse(t, 1), + }) + + config := NewConfig() + config.ChannelBufferSize = 0 + c, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + pc1, err := c.ConsumePartition("my_topic", 0, 0) + if err != nil { + t.Fatal(err) + } + + // When + pc2, err := c.ConsumePartition("my_topic", 0, 0) + + // Then + if pc2 != nil || err != ConfigurationError("That topic/partition is already being consumed") { + t.Fatal("A partition cannot be consumed twice at the same time") + } + + safeClose(t, pc1) + safeClose(t, c) + broker0.Close() +} + +// If consumer fails to refresh metadata it keeps retrying with frequency +// specified by `Config.Consumer.Retry.Backoff`. +func TestConsumerLeaderRefreshError(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 100) + + // Stage 1: my_topic/0 served by broker0 + Logger.Printf(" STAGE 1") + + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 123). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": NewMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 123, testMsg), + }) + + config := NewConfig() + config.Net.ReadTimeout = 100 * time.Millisecond + config.Consumer.Retry.Backoff = 200 * time.Millisecond + config.Consumer.Return.Errors = true + config.Metadata.Retry.Max = 0 + c, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) + if err != nil { + t.Fatal(err) + } + + assertMessageOffset(t, <-pc.Messages(), 123) + + // Stage 2: broker0 says that it is no longer the leader for my_topic/0, + // but the requests to retrieve metadata fail with network timeout. + Logger.Printf(" STAGE 2") + + fetchResponse2 := &FetchResponse{} + fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) + + broker0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": NewMockWrapper(fetchResponse2), + }) + + if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { + t.Errorf("Unexpected error: %v", consErr.Err) + } + + // Stage 3: finally the metadata returned by broker0 tells that broker1 is + // a new leader for my_topic/0. Consumption resumes. + + Logger.Printf(" STAGE 3") + + broker1 := NewMockBroker(t, 101) + + broker1.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": NewMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 124, testMsg), + }) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetBroker(broker1.Addr(), broker1.BrokerID()). + SetLeader("my_topic", 0, broker1.BrokerID()), + }) + + assertMessageOffset(t, <-pc.Messages(), 124) + + safeClose(t, pc) + safeClose(t, c) + broker1.Close() + broker0.Close() +} + +func TestConsumerInvalidTopic(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 100) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()), + }) + + c, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) + + // Then + if pc != nil || err != ErrUnknownTopicOrPartition { + t.Errorf("Should fail with, err=%v", err) + } + + safeClose(t, c) + broker0.Close() +} + +// Nothing bad happens if a partition consumer that has no leader assigned at +// the moment is closed. +func TestConsumerClosePartitionWithoutLeader(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 100) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 123). + SetOffset("my_topic", 0, OffsetNewest, 1000), + "FetchRequest": NewMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 123, testMsg), + }) + + config := NewConfig() + config.Net.ReadTimeout = 100 * time.Millisecond + config.Consumer.Retry.Backoff = 100 * time.Millisecond + config.Consumer.Return.Errors = true + config.Metadata.Retry.Max = 0 + c, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + pc, err := c.ConsumePartition("my_topic", 0, OffsetOldest) + if err != nil { + t.Fatal(err) + } + + assertMessageOffset(t, <-pc.Messages(), 123) + + // broker0 says that it is no longer the leader for my_topic/0, but the + // requests to retrieve metadata fail with network timeout. + fetchResponse2 := &FetchResponse{} + fetchResponse2.AddError("my_topic", 0, ErrNotLeaderForPartition) + + broker0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": NewMockWrapper(fetchResponse2), + }) + + // When + if consErr := <-pc.Errors(); consErr.Err != ErrOutOfBrokers { + t.Errorf("Unexpected error: %v", consErr.Err) + } + + // Then: the partition consumer can be closed without any problem. + safeClose(t, pc) + safeClose(t, c) + broker0.Close() +} + +// If the initial offset passed on partition consumer creation is out of the +// actual offset range for the partition, then the partition consumer stops +// immediately closing its output channels. +func TestConsumerShutsDownOutOfRange(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + fetchResponse := new(FetchResponse) + fetchResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 7), + "FetchRequest": NewMockWrapper(fetchResponse), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, 101) + if err != nil { + t.Fatal(err) + } + + // Then: consumer should shut down closing its messages and errors channels. + if _, ok := <-consumer.Messages(); ok { + t.Error("Expected the consumer to shut down") + } + safeClose(t, consumer) + + safeClose(t, master) + broker0.Close() +} + +// If a fetch response contains messages with offsets that are smaller then +// requested, then such messages are ignored. +func TestConsumerExtraOffsets(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + fetchResponse1 := &FetchResponse{} + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 1) + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 2) + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 3) + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 4) + fetchResponse2 := &FetchResponse{} + fetchResponse2.AddError("my_topic", 0, ErrNoError) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 0), + "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, 3) + if err != nil { + t.Fatal(err) + } + + // Then: messages with offsets 1 and 2 are not returned even though they + // are present in the response. + assertMessageOffset(t, <-consumer.Messages(), 3) + assertMessageOffset(t, <-consumer.Messages(), 4) + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// It is fine if offsets of fetched messages are not sequential (although +// strictly increasing!). +func TestConsumerNonSequentialOffsets(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + fetchResponse1 := &FetchResponse{} + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 5) + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 7) + fetchResponse1.AddMessage("my_topic", 0, nil, testMsg, 11) + fetchResponse2 := &FetchResponse{} + fetchResponse2.AddError("my_topic", 0, ErrNoError) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 0), + "FetchRequest": NewMockSequence(fetchResponse1, fetchResponse2), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When + consumer, err := master.ConsumePartition("my_topic", 0, 3) + if err != nil { + t.Fatal(err) + } + + // Then: messages with offsets 1 and 2 are not returned even though they + // are present in the response. + assertMessageOffset(t, <-consumer.Messages(), 5) + assertMessageOffset(t, <-consumer.Messages(), 7) + assertMessageOffset(t, <-consumer.Messages(), 11) + + safeClose(t, consumer) + safeClose(t, master) + broker0.Close() +} + +// If leadership for a partition is changing then consumer resolves the new +// leader and switches to it. +func TestConsumerRebalancingMultiplePartitions(t *testing.T) { + // initial setup + seedBroker := NewMockBroker(t, 10) + leader0 := NewMockBroker(t, 0) + leader1 := NewMockBroker(t, 1) + + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(leader0.Addr(), leader0.BrokerID()). + SetBroker(leader1.Addr(), leader1.BrokerID()). + SetLeader("my_topic", 0, leader0.BrokerID()). + SetLeader("my_topic", 1, leader1.BrokerID()), + }) + + mockOffsetResponse1 := NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 0). + SetOffset("my_topic", 0, OffsetNewest, 1000). + SetOffset("my_topic", 1, OffsetOldest, 0). + SetOffset("my_topic", 1, OffsetNewest, 1000) + leader0.SetHandlerByMap(map[string]MockResponse{ + "OffsetRequest": mockOffsetResponse1, + "FetchRequest": NewMockFetchResponse(t, 1), + }) + leader1.SetHandlerByMap(map[string]MockResponse{ + "OffsetRequest": mockOffsetResponse1, + "FetchRequest": NewMockFetchResponse(t, 1), + }) + + // launch test goroutines + config := NewConfig() + config.Consumer.Retry.Backoff = 50 + master, err := NewConsumer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + // we expect to end up (eventually) consuming exactly ten messages on each partition + var wg sync.WaitGroup + for i := int32(0); i < 2; i++ { + consumer, err := master.ConsumePartition("my_topic", i, 0) + if err != nil { + t.Error(err) + } + + go func(c PartitionConsumer) { + for err := range c.Errors() { + t.Error(err) + } + }(consumer) + + wg.Add(1) + go func(partition int32, c PartitionConsumer) { + for i := 0; i < 10; i++ { + message := <-consumer.Messages() + if message.Offset != int64(i) { + t.Error("Incorrect message offset!", i, partition, message.Offset) + } + if message.Partition != partition { + t.Error("Incorrect message partition!") + } + } + safeClose(t, consumer) + wg.Done() + }(i, consumer) + } + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 1") + // Stage 1: + // * my_topic/0 -> leader0 serves 4 messages + // * my_topic/1 -> leader1 serves 0 messages + + mockFetchResponse := NewMockFetchResponse(t, 1) + for i := 0; i < 4; i++ { + mockFetchResponse.SetMessage("my_topic", 0, int64(i), testMsg) + } + leader0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse, + }) + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 2") + // Stage 2: + // * leader0 says that it is no longer serving my_topic/0 + // * seedBroker tells that leader1 is serving my_topic/0 now + + // seed broker tells that the new partition 0 leader is leader1 + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetLeader("my_topic", 0, leader1.BrokerID()). + SetLeader("my_topic", 1, leader1.BrokerID()), + }) + + // leader0 says no longer leader of partition 0 + fetchResponse := new(FetchResponse) + fetchResponse.AddError("my_topic", 0, ErrNotLeaderForPartition) + leader0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": NewMockWrapper(fetchResponse), + }) + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 3") + // Stage 3: + // * my_topic/0 -> leader1 serves 3 messages + // * my_topic/1 -> leader1 server 8 messages + + // leader1 provides 3 message on partition 0, and 8 messages on partition 1 + mockFetchResponse2 := NewMockFetchResponse(t, 2) + for i := 4; i < 7; i++ { + mockFetchResponse2.SetMessage("my_topic", 0, int64(i), testMsg) + } + for i := 0; i < 8; i++ { + mockFetchResponse2.SetMessage("my_topic", 1, int64(i), testMsg) + } + leader1.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse2, + }) + + time.Sleep(50 * time.Millisecond) + Logger.Printf(" STAGE 4") + // Stage 4: + // * my_topic/0 -> leader1 serves 3 messages + // * my_topic/1 -> leader1 tells that it is no longer the leader + // * seedBroker tells that leader0 is a new leader for my_topic/1 + + // metadata assigns 0 to leader1 and 1 to leader0 + seedBroker.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetLeader("my_topic", 0, leader1.BrokerID()). + SetLeader("my_topic", 1, leader0.BrokerID()), + }) + + // leader1 provides three more messages on partition0, says no longer leader of partition1 + mockFetchResponse3 := NewMockFetchResponse(t, 3). + SetMessage("my_topic", 0, int64(7), testMsg). + SetMessage("my_topic", 0, int64(8), testMsg). + SetMessage("my_topic", 0, int64(9), testMsg) + fetchResponse4 := new(FetchResponse) + fetchResponse4.AddError("my_topic", 1, ErrNotLeaderForPartition) + leader1.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": NewMockSequence(mockFetchResponse3, fetchResponse4), + }) + + // leader0 provides two messages on partition 1 + mockFetchResponse4 := NewMockFetchResponse(t, 2) + for i := 8; i < 10; i++ { + mockFetchResponse4.SetMessage("my_topic", 1, int64(i), testMsg) + } + leader0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse4, + }) + + wg.Wait() + safeClose(t, master) + leader1.Close() + leader0.Close() + seedBroker.Close() +} + +// When two partitions have the same broker as the leader, if one partition +// consumer channel buffer is full then that does not affect the ability to +// read messages by the other consumer. +func TestConsumerInterleavedClose(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 0) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()). + SetLeader("my_topic", 1, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 1000). + SetOffset("my_topic", 0, OffsetNewest, 1100). + SetOffset("my_topic", 1, OffsetOldest, 2000). + SetOffset("my_topic", 1, OffsetNewest, 2100), + "FetchRequest": NewMockFetchResponse(t, 1). + SetMessage("my_topic", 0, 1000, testMsg). + SetMessage("my_topic", 0, 1001, testMsg). + SetMessage("my_topic", 0, 1002, testMsg). + SetMessage("my_topic", 1, 2000, testMsg), + }) + + config := NewConfig() + config.ChannelBufferSize = 0 + master, err := NewConsumer([]string{broker0.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + c0, err := master.ConsumePartition("my_topic", 0, 1000) + if err != nil { + t.Fatal(err) + } + + c1, err := master.ConsumePartition("my_topic", 1, 2000) + if err != nil { + t.Fatal(err) + } + + // When/Then: we can read from partition 0 even if nobody reads from partition 1 + assertMessageOffset(t, <-c0.Messages(), 1000) + assertMessageOffset(t, <-c0.Messages(), 1001) + assertMessageOffset(t, <-c0.Messages(), 1002) + + safeClose(t, c1) + safeClose(t, c0) + safeClose(t, master) + broker0.Close() +} + +func TestConsumerBounceWithReferenceOpen(t *testing.T) { + broker0 := NewMockBroker(t, 0) + broker0Addr := broker0.Addr() + broker1 := NewMockBroker(t, 1) + + mockMetadataResponse := NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetBroker(broker1.Addr(), broker1.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()). + SetLeader("my_topic", 1, broker1.BrokerID()) + + mockOffsetResponse := NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetOldest, 1000). + SetOffset("my_topic", 0, OffsetNewest, 1100). + SetOffset("my_topic", 1, OffsetOldest, 2000). + SetOffset("my_topic", 1, OffsetNewest, 2100) + + mockFetchResponse := NewMockFetchResponse(t, 1) + for i := 0; i < 10; i++ { + mockFetchResponse.SetMessage("my_topic", 0, int64(1000+i), testMsg) + mockFetchResponse.SetMessage("my_topic", 1, int64(2000+i), testMsg) + } + + broker0.SetHandlerByMap(map[string]MockResponse{ + "OffsetRequest": mockOffsetResponse, + "FetchRequest": mockFetchResponse, + }) + broker1.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": mockMetadataResponse, + "OffsetRequest": mockOffsetResponse, + "FetchRequest": mockFetchResponse, + }) + + config := NewConfig() + config.Consumer.Return.Errors = true + config.Consumer.Retry.Backoff = 100 * time.Millisecond + config.ChannelBufferSize = 1 + master, err := NewConsumer([]string{broker1.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + c0, err := master.ConsumePartition("my_topic", 0, 1000) + if err != nil { + t.Fatal(err) + } + + c1, err := master.ConsumePartition("my_topic", 1, 2000) + if err != nil { + t.Fatal(err) + } + + // read messages from both partition to make sure that both brokers operate + // normally. + assertMessageOffset(t, <-c0.Messages(), 1000) + assertMessageOffset(t, <-c1.Messages(), 2000) + + // Simulate broker shutdown. Note that metadata response does not change, + // that is the leadership does not move to another broker. So partition + // consumer will keep retrying to restore the connection with the broker. + broker0.Close() + + // Make sure that while the partition/0 leader is down, consumer/partition/1 + // is capable of pulling messages from broker1. + for i := 1; i < 7; i++ { + offset := (<-c1.Messages()).Offset + if offset != int64(2000+i) { + t.Errorf("Expected offset %d from consumer/partition/1", int64(2000+i)) + } + } + + // Bring broker0 back to service. + broker0 = NewMockBrokerAddr(t, 0, broker0Addr) + broker0.SetHandlerByMap(map[string]MockResponse{ + "FetchRequest": mockFetchResponse, + }) + + // Read the rest of messages from both partitions. + for i := 7; i < 10; i++ { + assertMessageOffset(t, <-c1.Messages(), int64(2000+i)) + } + for i := 1; i < 10; i++ { + assertMessageOffset(t, <-c0.Messages(), int64(1000+i)) + } + + select { + case <-c0.Errors(): + default: + t.Errorf("Partition consumer should have detected broker restart") + } + + safeClose(t, c1) + safeClose(t, c0) + safeClose(t, master) + broker0.Close() + broker1.Close() +} + +func TestConsumerOffsetOutOfRange(t *testing.T) { + // Given + broker0 := NewMockBroker(t, 2) + broker0.SetHandlerByMap(map[string]MockResponse{ + "MetadataRequest": NewMockMetadataResponse(t). + SetBroker(broker0.Addr(), broker0.BrokerID()). + SetLeader("my_topic", 0, broker0.BrokerID()), + "OffsetRequest": NewMockOffsetResponse(t). + SetOffset("my_topic", 0, OffsetNewest, 1234). + SetOffset("my_topic", 0, OffsetOldest, 2345), + }) + + master, err := NewConsumer([]string{broker0.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + // When/Then + if _, err := master.ConsumePartition("my_topic", 0, 0); err != ErrOffsetOutOfRange { + t.Fatal("Should return ErrOffsetOutOfRange, got:", err) + } + if _, err := master.ConsumePartition("my_topic", 0, 3456); err != ErrOffsetOutOfRange { + t.Fatal("Should return ErrOffsetOutOfRange, got:", err) + } + if _, err := master.ConsumePartition("my_topic", 0, -3); err != ErrOffsetOutOfRange { + t.Fatal("Should return ErrOffsetOutOfRange, got:", err) + } + + safeClose(t, master) + broker0.Close() +} + +func assertMessageOffset(t *testing.T, msg *ConsumerMessage, expectedOffset int64) { + if msg.Offset != expectedOffset { + t.Errorf("Incorrect message offset: expected=%d, actual=%d", expectedOffset, msg.Offset) + } +} + +// This example shows how to use the consumer to read messages +// from a single partition. +func ExampleConsumer() { + consumer, err := NewConsumer([]string{"localhost:9092"}, nil) + if err != nil { + panic(err) + } + + defer func() { + if err := consumer.Close(); err != nil { + log.Fatalln(err) + } + }() + + partitionConsumer, err := consumer.ConsumePartition("my_topic", 0, OffsetNewest) + if err != nil { + panic(err) + } + + defer func() { + if err := partitionConsumer.Close(); err != nil { + log.Fatalln(err) + } + }() + + // Trap SIGINT to trigger a shutdown. + signals := make(chan os.Signal, 1) + signal.Notify(signals, os.Interrupt) + + consumed := 0 +ConsumerLoop: + for { + select { + case msg := <-partitionConsumer.Messages(): + log.Printf("Consumed message offset %d\n", msg.Offset) + consumed++ + case <-signals: + break ConsumerLoop + } + } + + log.Printf("Consumed: %d\n", consumed) +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request_test.go b/vendor/github.com/Shopify/sarama/describe_groups_request_test.go new file mode 100644 index 0000000000..7d45f3fee4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_request_test.go @@ -0,0 +1,34 @@ +package sarama + +import "testing" + +var ( + emptyDescribeGroupsRequest = []byte{0, 0, 0, 0} + + singleDescribeGroupsRequest = []byte{ + 0, 0, 0, 1, // 1 group + 0, 3, 'f', 'o', 'o', // group name: foo + } + + doubleDescribeGroupsRequest = []byte{ + 0, 0, 0, 2, // 2 groups + 0, 3, 'f', 'o', 'o', // group name: foo + 0, 3, 'b', 'a', 'r', // group name: foo + } +) + +func TestDescribeGroupsRequest(t *testing.T) { + var request *DescribeGroupsRequest + + request = new(DescribeGroupsRequest) + testRequest(t, "no groups", request, emptyDescribeGroupsRequest) + + request = new(DescribeGroupsRequest) + request.AddGroup("foo") + testRequest(t, "one group", request, singleDescribeGroupsRequest) + + request = new(DescribeGroupsRequest) + request.AddGroup("foo") + request.AddGroup("bar") + testRequest(t, "two groups", request, doubleDescribeGroupsRequest) +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response_test.go b/vendor/github.com/Shopify/sarama/describe_groups_response_test.go new file mode 100644 index 0000000000..dd39731911 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_response_test.go @@ -0,0 +1,91 @@ +package sarama + +import ( + "reflect" + "testing" +) + +var ( + describeGroupsResponseEmpty = []byte{ + 0, 0, 0, 0, // no groups + } + + describeGroupsResponsePopulated = []byte{ + 0, 0, 0, 2, // 2 groups + + 0, 0, // no error + 0, 3, 'f', 'o', 'o', // Group ID + 0, 3, 'b', 'a', 'r', // State + 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // ConsumerProtocol type + 0, 3, 'b', 'a', 'z', // Protocol name + 0, 0, 0, 1, // 1 member + 0, 2, 'i', 'd', // Member ID + 0, 6, 's', 'a', 'r', 'a', 'm', 'a', // Client ID + 0, 9, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', // Client Host + 0, 0, 0, 3, 0x01, 0x02, 0x03, // MemberMetadata + 0, 0, 0, 3, 0x04, 0x05, 0x06, // MemberAssignment + + 0, 30, // ErrGroupAuthorizationFailed + 0, 0, + 0, 0, + 0, 0, + 0, 0, + 0, 0, 0, 0, + } +) + +func TestDescribeGroupsResponse(t *testing.T) { + var response *DescribeGroupsResponse + + response = new(DescribeGroupsResponse) + testVersionDecodable(t, "empty", response, describeGroupsResponseEmpty, 0) + if len(response.Groups) != 0 { + t.Error("Expected no groups") + } + + response = new(DescribeGroupsResponse) + testVersionDecodable(t, "populated", response, describeGroupsResponsePopulated, 0) + if len(response.Groups) != 2 { + t.Error("Expected two groups") + } + + group0 := response.Groups[0] + if group0.Err != ErrNoError { + t.Error("Unxpected groups[0].Err, found", group0.Err) + } + if group0.GroupId != "foo" { + t.Error("Unxpected groups[0].GroupId, found", group0.GroupId) + } + if group0.State != "bar" { + t.Error("Unxpected groups[0].State, found", group0.State) + } + if group0.ProtocolType != "consumer" { + t.Error("Unxpected groups[0].ProtocolType, found", group0.ProtocolType) + } + if group0.Protocol != "baz" { + t.Error("Unxpected groups[0].Protocol, found", group0.Protocol) + } + if len(group0.Members) != 1 { + t.Error("Unxpected groups[0].Members, found", group0.Members) + } + if group0.Members["id"].ClientId != "sarama" { + t.Error("Unxpected groups[0].Members[id].ClientId, found", group0.Members["id"].ClientId) + } + if group0.Members["id"].ClientHost != "localhost" { + t.Error("Unxpected groups[0].Members[id].ClientHost, found", group0.Members["id"].ClientHost) + } + if !reflect.DeepEqual(group0.Members["id"].MemberMetadata, []byte{0x01, 0x02, 0x03}) { + t.Error("Unxpected groups[0].Members[id].MemberMetadata, found", group0.Members["id"].MemberMetadata) + } + if !reflect.DeepEqual(group0.Members["id"].MemberAssignment, []byte{0x04, 0x05, 0x06}) { + t.Error("Unxpected groups[0].Members[id].MemberAssignment, found", group0.Members["id"].MemberAssignment) + } + + group1 := response.Groups[1] + if group1.Err != ErrGroupAuthorizationFailed { + t.Error("Unxpected groups[1].Err, found", group0.Err) + } + if len(group1.Members) != 0 { + t.Error("Unxpected groups[1].Members, found", group0.Members) + } +} diff --git a/vendor/github.com/Shopify/sarama/fetch_request_test.go b/vendor/github.com/Shopify/sarama/fetch_request_test.go new file mode 100644 index 0000000000..32c083c7d3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_request_test.go @@ -0,0 +1,34 @@ +package sarama + +import "testing" + +var ( + fetchRequestNoBlocks = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + fetchRequestWithProperties = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00, 0x00, 0xEF, + 0x00, 0x00, 0x00, 0x00} + + fetchRequestOneBlock = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x56} +) + +func TestFetchRequest(t *testing.T) { + request := new(FetchRequest) + testRequest(t, "no blocks", request, fetchRequestNoBlocks) + + request.MaxWaitTime = 0x20 + request.MinBytes = 0xEF + testRequest(t, "with properties", request, fetchRequestWithProperties) + + request.MaxWaitTime = 0 + request.MinBytes = 0 + request.AddBlock("topic", 0x12, 0x34, 0x56) + testRequest(t, "one block", request, fetchRequestOneBlock) +} diff --git a/vendor/github.com/Shopify/sarama/fetch_response_test.go b/vendor/github.com/Shopify/sarama/fetch_response_test.go new file mode 100644 index 0000000000..52fb5a74cb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_response_test.go @@ -0,0 +1,84 @@ +package sarama + +import ( + "bytes" + "testing" +) + +var ( + emptyFetchResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} + + oneMessageFetchResponse = []byte{ + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x05, + 0x00, 0x01, + 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x10, 0x10, + 0x00, 0x00, 0x00, 0x1C, + // messageSet + 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, + // message + 0x23, 0x96, 0x4a, 0xf7, // CRC + 0x00, + 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} +) + +func TestEmptyFetchResponse(t *testing.T) { + response := FetchResponse{} + testVersionDecodable(t, "empty", &response, emptyFetchResponse, 0) + + if len(response.Blocks) != 0 { + t.Error("Decoding produced topic blocks where there were none.") + } + +} + +func TestOneMessageFetchResponse(t *testing.T) { + response := FetchResponse{} + testVersionDecodable(t, "one message", &response, oneMessageFetchResponse, 0) + + if len(response.Blocks) != 1 { + t.Fatal("Decoding produced incorrect number of topic blocks.") + } + + if len(response.Blocks["topic"]) != 1 { + t.Fatal("Decoding produced incorrect number of partition blocks for topic.") + } + + block := response.GetBlock("topic", 5) + if block == nil { + t.Fatal("GetBlock didn't return block.") + } + if block.Err != ErrOffsetOutOfRange { + t.Error("Decoding didn't produce correct error code.") + } + if block.HighWaterMarkOffset != 0x10101010 { + t.Error("Decoding didn't produce correct high water mark offset.") + } + if block.MsgSet.PartialTrailingMessage { + t.Error("Decoding detected a partial trailing message where there wasn't one.") + } + + if len(block.MsgSet.Messages) != 1 { + t.Fatal("Decoding produced incorrect number of messages.") + } + msgBlock := block.MsgSet.Messages[0] + if msgBlock.Offset != 0x550000 { + t.Error("Decoding produced incorrect message offset.") + } + msg := msgBlock.Msg + if msg.Codec != CompressionNone { + t.Error("Decoding produced incorrect message compression.") + } + if msg.Key != nil { + t.Error("Decoding produced message key where there was none.") + } + if !bytes.Equal(msg.Value, []byte{0x00, 0xEE}) { + t.Error("Decoding produced incorrect message value.") + } +} diff --git a/vendor/github.com/Shopify/sarama/functional_client_test.go b/vendor/github.com/Shopify/sarama/functional_client_test.go new file mode 100644 index 0000000000..9e8e32968d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/functional_client_test.go @@ -0,0 +1,90 @@ +package sarama + +import ( + "fmt" + "testing" + "time" +) + +func TestFuncConnectionFailure(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + Proxies["kafka1"].Enabled = false + SaveProxy(t, "kafka1") + + config := NewConfig() + config.Metadata.Retry.Max = 1 + + _, err := NewClient([]string{kafkaBrokers[0]}, config) + if err != ErrOutOfBrokers { + t.Fatal("Expected returned error to be ErrOutOfBrokers, but was: ", err) + } +} + +func TestFuncClientMetadata(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + config := NewConfig() + config.Metadata.Retry.Max = 1 + config.Metadata.Retry.Backoff = 10 * time.Millisecond + client, err := NewClient(kafkaBrokers, config) + if err != nil { + t.Fatal(err) + } + + if err := client.RefreshMetadata("unknown_topic"); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + if _, err := client.Leader("unknown_topic", 0); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + if _, err := client.Replicas("invalid/topic", 0); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, got", err) + } + + partitions, err := client.Partitions("test.4") + if err != nil { + t.Error(err) + } + if len(partitions) != 4 { + t.Errorf("Expected test.4 topic to have 4 partitions, found %v", partitions) + } + + partitions, err = client.Partitions("test.1") + if err != nil { + t.Error(err) + } + if len(partitions) != 1 { + t.Errorf("Expected test.1 topic to have 1 partitions, found %v", partitions) + } + + safeClose(t, client) +} + +func TestFuncClientCoordinator(t *testing.T) { + checkKafkaVersion(t, "0.8.2") + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + client, err := NewClient(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + broker, err := client.Coordinator(fmt.Sprintf("another_new_consumer_group_%d", i)) + if err != nil { + t.Error(err) + } + + if connected, err := broker.Connected(); !connected || err != nil { + t.Errorf("Expected to coordinator %s broker to be properly connected.", broker.Addr()) + } + } + + safeClose(t, client) +} diff --git a/vendor/github.com/Shopify/sarama/functional_consumer_test.go b/vendor/github.com/Shopify/sarama/functional_consumer_test.go new file mode 100644 index 0000000000..ab84331091 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/functional_consumer_test.go @@ -0,0 +1,61 @@ +package sarama + +import ( + "math" + "testing" +) + +func TestFuncConsumerOffsetOutOfRange(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + consumer, err := NewConsumer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + + if _, err := consumer.ConsumePartition("test.1", 0, -10); err != ErrOffsetOutOfRange { + t.Error("Expected ErrOffsetOutOfRange, got:", err) + } + + if _, err := consumer.ConsumePartition("test.1", 0, math.MaxInt64); err != ErrOffsetOutOfRange { + t.Error("Expected ErrOffsetOutOfRange, got:", err) + } + + safeClose(t, consumer) +} + +func TestConsumerHighWaterMarkOffset(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + p, err := NewSyncProducer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + defer safeClose(t, p) + + _, offset, err := p.SendMessage(&ProducerMessage{Topic: "test.1", Value: StringEncoder("Test")}) + if err != nil { + t.Fatal(err) + } + + c, err := NewConsumer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + defer safeClose(t, c) + + pc, err := c.ConsumePartition("test.1", 0, OffsetOldest) + if err != nil { + t.Fatal(err) + } + + <-pc.Messages() + + if hwmo := pc.HighWaterMarkOffset(); hwmo != offset+1 { + t.Logf("Last produced offset %d; high water mark should be one higher but found %d.", offset, hwmo) + } + + safeClose(t, pc) +} diff --git a/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go b/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go new file mode 100644 index 0000000000..436f35ef42 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/functional_offset_manager_test.go @@ -0,0 +1,47 @@ +package sarama + +import ( + "testing" +) + +func TestFuncOffsetManager(t *testing.T) { + checkKafkaVersion(t, "0.8.2") + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + client, err := NewClient(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + + offsetManager, err := NewOffsetManagerFromClient("sarama.TestFuncOffsetManager", client) + if err != nil { + t.Fatal(err) + } + + pom1, err := offsetManager.ManagePartition("test.1", 0) + if err != nil { + t.Fatal(err) + } + + pom1.MarkOffset(10, "test metadata") + safeClose(t, pom1) + + pom2, err := offsetManager.ManagePartition("test.1", 0) + if err != nil { + t.Fatal(err) + } + + offset, metadata := pom2.NextOffset() + + if offset != 10 { + t.Errorf("Expected the next offset to be 10, found %d.", offset) + } + if metadata != "test metadata" { + t.Errorf("Expected metadata to be 'test metadata', found %s.", metadata) + } + + safeClose(t, pom2) + safeClose(t, offsetManager) + safeClose(t, client) +} diff --git a/vendor/github.com/Shopify/sarama/functional_producer_test.go b/vendor/github.com/Shopify/sarama/functional_producer_test.go new file mode 100644 index 0000000000..1504e76005 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/functional_producer_test.go @@ -0,0 +1,203 @@ +package sarama + +import ( + "fmt" + "sync" + "testing" + "time" +) + +const TestBatchSize = 1000 + +func TestFuncProducing(t *testing.T) { + config := NewConfig() + testProducingMessages(t, config) +} + +func TestFuncProducingGzip(t *testing.T) { + config := NewConfig() + config.Producer.Compression = CompressionGZIP + testProducingMessages(t, config) +} + +func TestFuncProducingSnappy(t *testing.T) { + config := NewConfig() + config.Producer.Compression = CompressionSnappy + testProducingMessages(t, config) +} + +func TestFuncProducingNoResponse(t *testing.T) { + config := NewConfig() + config.Producer.RequiredAcks = NoResponse + testProducingMessages(t, config) +} + +func TestFuncProducingFlushing(t *testing.T) { + config := NewConfig() + config.Producer.Flush.Messages = TestBatchSize / 8 + config.Producer.Flush.Frequency = 250 * time.Millisecond + testProducingMessages(t, config) +} + +func TestFuncMultiPartitionProduce(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + config := NewConfig() + config.ChannelBufferSize = 20 + config.Producer.Flush.Frequency = 50 * time.Millisecond + config.Producer.Flush.Messages = 200 + config.Producer.Return.Successes = true + producer, err := NewSyncProducer(kafkaBrokers, config) + if err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + wg.Add(TestBatchSize) + + for i := 1; i <= TestBatchSize; i++ { + go func(i int) { + defer wg.Done() + msg := &ProducerMessage{Topic: "test.64", Key: nil, Value: StringEncoder(fmt.Sprintf("hur %d", i))} + if _, _, err := producer.SendMessage(msg); err != nil { + t.Error(i, err) + } + }(i) + } + + wg.Wait() + if err := producer.Close(); err != nil { + t.Error(err) + } +} + +func TestFuncProducingToInvalidTopic(t *testing.T) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + producer, err := NewSyncProducer(kafkaBrokers, nil) + if err != nil { + t.Fatal(err) + } + + if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + + if _, _, err := producer.SendMessage(&ProducerMessage{Topic: "in/valid"}); err != ErrUnknownTopicOrPartition { + t.Error("Expected ErrUnknownTopicOrPartition, found", err) + } + + safeClose(t, producer) +} + +func testProducingMessages(t *testing.T, config *Config) { + setupFunctionalTest(t) + defer teardownFunctionalTest(t) + + config.Producer.Return.Successes = true + config.Consumer.Return.Errors = true + + client, err := NewClient(kafkaBrokers, config) + if err != nil { + t.Fatal(err) + } + + master, err := NewConsumerFromClient(client) + if err != nil { + t.Fatal(err) + } + consumer, err := master.ConsumePartition("test.1", 0, OffsetNewest) + if err != nil { + t.Fatal(err) + } + + producer, err := NewAsyncProducerFromClient(client) + if err != nil { + t.Fatal(err) + } + + expectedResponses := TestBatchSize + for i := 1; i <= TestBatchSize; { + msg := &ProducerMessage{Topic: "test.1", Key: nil, Value: StringEncoder(fmt.Sprintf("testing %d", i))} + select { + case producer.Input() <- msg: + i++ + case ret := <-producer.Errors(): + t.Fatal(ret.Err) + case <-producer.Successes(): + expectedResponses-- + } + } + for expectedResponses > 0 { + select { + case ret := <-producer.Errors(): + t.Fatal(ret.Err) + case <-producer.Successes(): + expectedResponses-- + } + } + safeClose(t, producer) + + for i := 1; i <= TestBatchSize; i++ { + select { + case <-time.After(10 * time.Second): + t.Fatal("Not received any more events in the last 10 seconds.") + + case err := <-consumer.Errors(): + t.Error(err) + + case message := <-consumer.Messages(): + if string(message.Value) != fmt.Sprintf("testing %d", i) { + t.Fatalf("Unexpected message with index %d: %s", i, message.Value) + } + } + + } + safeClose(t, consumer) + safeClose(t, client) +} + +// Benchmarks + +func BenchmarkProducerSmall(b *testing.B) { + benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 128))) +} +func BenchmarkProducerMedium(b *testing.B) { + benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 1024))) +} +func BenchmarkProducerLarge(b *testing.B) { + benchmarkProducer(b, nil, "test.64", ByteEncoder(make([]byte, 8192))) +} +func BenchmarkProducerSmallSinglePartition(b *testing.B) { + benchmarkProducer(b, nil, "test.1", ByteEncoder(make([]byte, 128))) +} +func BenchmarkProducerMediumSnappy(b *testing.B) { + conf := NewConfig() + conf.Producer.Compression = CompressionSnappy + benchmarkProducer(b, conf, "test.1", ByteEncoder(make([]byte, 1024))) +} + +func benchmarkProducer(b *testing.B, conf *Config, topic string, value Encoder) { + setupFunctionalTest(b) + defer teardownFunctionalTest(b) + + producer, err := NewAsyncProducer(kafkaBrokers, conf) + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + + for i := 1; i <= b.N; { + msg := &ProducerMessage{Topic: topic, Key: StringEncoder(fmt.Sprintf("%d", i)), Value: value} + select { + case producer.Input() <- msg: + i++ + case ret := <-producer.Errors(): + b.Fatal(ret.Err) + } + } + safeClose(b, producer) +} diff --git a/vendor/github.com/Shopify/sarama/functional_test.go b/vendor/github.com/Shopify/sarama/functional_test.go new file mode 100644 index 0000000000..846eb29f9f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/functional_test.go @@ -0,0 +1,148 @@ +package sarama + +import ( + "log" + "math/rand" + "net" + "os" + "strconv" + "strings" + "testing" + "time" + + toxiproxy "github.com/Shopify/toxiproxy/client" +) + +const ( + VagrantToxiproxy = "http://192.168.100.67:8474" + VagrantKafkaPeers = "192.168.100.67:9091,192.168.100.67:9092,192.168.100.67:9093,192.168.100.67:9094,192.168.100.67:9095" + VagrantZookeeperPeers = "192.168.100.67:2181,192.168.100.67:2182,192.168.100.67:2183,192.168.100.67:2184,192.168.100.67:2185" +) + +var ( + kafkaAvailable, kafkaRequired bool + kafkaBrokers []string + + proxyClient *toxiproxy.Client + Proxies map[string]*toxiproxy.Proxy + ZKProxies = []string{"zk1", "zk2", "zk3", "zk4", "zk5"} + KafkaProxies = []string{"kafka1", "kafka2", "kafka3", "kafka4", "kafka5"} +) + +func init() { + if os.Getenv("DEBUG") == "true" { + Logger = log.New(os.Stdout, "[sarama] ", log.LstdFlags) + } + + seed := time.Now().UTC().UnixNano() + if tmp := os.Getenv("TEST_SEED"); tmp != "" { + seed, _ = strconv.ParseInt(tmp, 0, 64) + } + Logger.Println("Using random seed:", seed) + rand.Seed(seed) + + proxyAddr := os.Getenv("TOXIPROXY_ADDR") + if proxyAddr == "" { + proxyAddr = VagrantToxiproxy + } + proxyClient = toxiproxy.NewClient(proxyAddr) + + kafkaPeers := os.Getenv("KAFKA_PEERS") + if kafkaPeers == "" { + kafkaPeers = VagrantKafkaPeers + } + kafkaBrokers = strings.Split(kafkaPeers, ",") + + if c, err := net.DialTimeout("tcp", kafkaBrokers[0], 5*time.Second); err == nil { + if err = c.Close(); err == nil { + kafkaAvailable = true + } + } + + kafkaRequired = os.Getenv("CI") != "" +} + +func checkKafkaAvailability(t testing.TB) { + if !kafkaAvailable { + if kafkaRequired { + t.Fatalf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) + } else { + t.Skipf("Kafka broker is not available on %s. Set KAFKA_PEERS to connect to Kafka on a different location.", kafkaBrokers[0]) + } + } +} + +func checkKafkaVersion(t testing.TB, requiredVersion string) { + kafkaVersion := os.Getenv("KAFKA_VERSION") + if kafkaVersion == "" { + t.Logf("No KAFKA_VERSION set. This test requires Kafka version %s or higher. Continuing...", requiredVersion) + } else { + available := parseKafkaVersion(kafkaVersion) + required := parseKafkaVersion(requiredVersion) + if !available.satisfies(required) { + t.Skipf("Kafka version %s is required for this test; you have %s. Skipping...", requiredVersion, kafkaVersion) + } + } +} + +func resetProxies(t testing.TB) { + if err := proxyClient.ResetState(); err != nil { + t.Error(err) + } + Proxies = nil +} + +func fetchProxies(t testing.TB) { + var err error + Proxies, err = proxyClient.Proxies() + if err != nil { + t.Fatal(err) + } +} + +func SaveProxy(t *testing.T, px string) { + if err := Proxies[px].Save(); err != nil { + t.Fatal(err) + } +} + +func setupFunctionalTest(t testing.TB) { + checkKafkaAvailability(t) + resetProxies(t) + fetchProxies(t) +} + +func teardownFunctionalTest(t testing.TB) { + resetProxies(t) +} + +type kafkaVersion []int + +func (kv kafkaVersion) satisfies(other kafkaVersion) bool { + var ov int + for index, v := range kv { + if len(other) <= index { + ov = 0 + } else { + ov = other[index] + } + + if v < ov { + return false + } else if v > ov { + return true + } + } + return true +} + +func parseKafkaVersion(version string) kafkaVersion { + numbers := strings.Split(version, ".") + result := make(kafkaVersion, 0, len(numbers)) + for _, number := range numbers { + nr, _ := strconv.Atoi(number) + result = append(result, nr) + } + + return result +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request_test.go b/vendor/github.com/Shopify/sarama/heartbeat_request_test.go new file mode 100644 index 0000000000..da6cd18f5a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_request_test.go @@ -0,0 +1,21 @@ +package sarama + +import "testing" + +var ( + basicHeartbeatRequest = []byte{ + 0, 3, 'f', 'o', 'o', // Group ID + 0x00, 0x01, 0x02, 0x03, // Generatiuon ID + 0, 3, 'b', 'a', 'z', // Member ID + } +) + +func TestHeartbeatRequest(t *testing.T) { + var request *HeartbeatRequest + + request = new(HeartbeatRequest) + request.GroupId = "foo" + request.GenerationId = 66051 + request.MemberId = "baz" + testRequest(t, "basic", request, basicHeartbeatRequest) +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response_test.go b/vendor/github.com/Shopify/sarama/heartbeat_response_test.go new file mode 100644 index 0000000000..5bcbec985f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_response_test.go @@ -0,0 +1,18 @@ +package sarama + +import "testing" + +var ( + heartbeatResponseNoError = []byte{ + 0x00, 0x00} +) + +func TestHeartbeatResponse(t *testing.T) { + var response *HeartbeatResponse + + response = new(HeartbeatResponse) + testVersionDecodable(t, "no error", response, heartbeatResponseNoError, 0) + if response.Err != ErrNoError { + t.Error("Decoding error failed: no error expected but found", response.Err) + } +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request_test.go b/vendor/github.com/Shopify/sarama/join_group_request_test.go new file mode 100644 index 0000000000..8a6448c0e1 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_request_test.go @@ -0,0 +1,41 @@ +package sarama + +import "testing" + +var ( + joinGroupRequestNoProtocols = []byte{ + 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID + 0, 0, 0, 100, // Session timeout + 0, 0, // Member ID + 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type + 0, 0, 0, 0, // 0 protocol groups + } + + joinGroupRequestOneProtocol = []byte{ + 0, 9, 'T', 'e', 's', 't', 'G', 'r', 'o', 'u', 'p', // Group ID + 0, 0, 0, 100, // Session timeout + 0, 11, 'O', 'n', 'e', 'P', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Member ID + 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // Protocol Type + 0, 0, 0, 1, // 1 group protocol + 0, 3, 'o', 'n', 'e', // Protocol name + 0, 0, 0, 3, 0x01, 0x02, 0x03, // protocol metadata + } +) + +func TestJoinGroupRequest(t *testing.T) { + var request *JoinGroupRequest + + request = new(JoinGroupRequest) + request.GroupId = "TestGroup" + request.SessionTimeout = 100 + request.ProtocolType = "consumer" + testRequest(t, "no protocols", request, joinGroupRequestNoProtocols) + + request = new(JoinGroupRequest) + request.GroupId = "TestGroup" + request.SessionTimeout = 100 + request.MemberId = "OneProtocol" + request.ProtocolType = "consumer" + request.AddGroupProtocol("one", []byte{0x01, 0x02, 0x03}) + testRequest(t, "one protocol", request, joinGroupRequestOneProtocol) +} diff --git a/vendor/github.com/Shopify/sarama/join_group_response_test.go b/vendor/github.com/Shopify/sarama/join_group_response_test.go new file mode 100644 index 0000000000..ba7f71f208 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_response_test.go @@ -0,0 +1,98 @@ +package sarama + +import ( + "reflect" + "testing" +) + +var ( + joinGroupResponseNoError = []byte{ + 0x00, 0x00, // No error + 0x00, 0x01, 0x02, 0x03, // Generation ID + 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen + 0, 3, 'f', 'o', 'o', // Leader ID + 0, 3, 'b', 'a', 'r', // Member ID + 0, 0, 0, 0, // No member info + } + + joinGroupResponseWithError = []byte{ + 0, 23, // Error: inconsistent group protocol + 0x00, 0x00, 0x00, 0x00, // Generation ID + 0, 0, // Protocol name chosen + 0, 0, // Leader ID + 0, 0, // Member ID + 0, 0, 0, 0, // No member info + } + + joinGroupResponseLeader = []byte{ + 0x00, 0x00, // No error + 0x00, 0x01, 0x02, 0x03, // Generation ID + 0, 8, 'p', 'r', 'o', 't', 'o', 'c', 'o', 'l', // Protocol name chosen + 0, 3, 'f', 'o', 'o', // Leader ID + 0, 3, 'f', 'o', 'o', // Member ID == Leader ID + 0, 0, 0, 1, // 1 member + 0, 3, 'f', 'o', 'o', // Member ID + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member metadata + } +) + +func TestJoinGroupResponse(t *testing.T) { + var response *JoinGroupResponse + + response = new(JoinGroupResponse) + testVersionDecodable(t, "no error", response, joinGroupResponseNoError, 0) + if response.Err != ErrNoError { + t.Error("Decoding Err failed: no error expected but found", response.Err) + } + if response.GenerationId != 66051 { + t.Error("Decoding GenerationId failed, found:", response.GenerationId) + } + if response.LeaderId != "foo" { + t.Error("Decoding LeaderId failed, found:", response.LeaderId) + } + if response.MemberId != "bar" { + t.Error("Decoding MemberId failed, found:", response.MemberId) + } + if len(response.Members) != 0 { + t.Error("Decoding Members failed, found:", response.Members) + } + + response = new(JoinGroupResponse) + testVersionDecodable(t, "with error", response, joinGroupResponseWithError, 0) + if response.Err != ErrInconsistentGroupProtocol { + t.Error("Decoding Err failed: ErrInconsistentGroupProtocol expected but found", response.Err) + } + if response.GenerationId != 0 { + t.Error("Decoding GenerationId failed, found:", response.GenerationId) + } + if response.LeaderId != "" { + t.Error("Decoding LeaderId failed, found:", response.LeaderId) + } + if response.MemberId != "" { + t.Error("Decoding MemberId failed, found:", response.MemberId) + } + if len(response.Members) != 0 { + t.Error("Decoding Members failed, found:", response.Members) + } + + response = new(JoinGroupResponse) + testVersionDecodable(t, "with error", response, joinGroupResponseLeader, 0) + if response.Err != ErrNoError { + t.Error("Decoding Err failed: ErrNoError expected but found", response.Err) + } + if response.GenerationId != 66051 { + t.Error("Decoding GenerationId failed, found:", response.GenerationId) + } + if response.LeaderId != "foo" { + t.Error("Decoding LeaderId failed, found:", response.LeaderId) + } + if response.MemberId != "foo" { + t.Error("Decoding MemberId failed, found:", response.MemberId) + } + if len(response.Members) != 1 { + t.Error("Decoding Members failed, found:", response.Members) + } + if !reflect.DeepEqual(response.Members["foo"], []byte{0x01, 0x02, 0x03}) { + t.Error("Decoding foo member failed, found:", response.Members["foo"]) + } +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request_test.go b/vendor/github.com/Shopify/sarama/leave_group_request_test.go new file mode 100644 index 0000000000..c1fed6d25f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_request_test.go @@ -0,0 +1,19 @@ +package sarama + +import "testing" + +var ( + basicLeaveGroupRequest = []byte{ + 0, 3, 'f', 'o', 'o', + 0, 3, 'b', 'a', 'r', + } +) + +func TestLeaveGroupRequest(t *testing.T) { + var request *LeaveGroupRequest + + request = new(LeaveGroupRequest) + request.GroupId = "foo" + request.MemberId = "bar" + testRequest(t, "basic", request, basicLeaveGroupRequest) +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response_test.go b/vendor/github.com/Shopify/sarama/leave_group_response_test.go new file mode 100644 index 0000000000..9207c66681 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_response_test.go @@ -0,0 +1,24 @@ +package sarama + +import "testing" + +var ( + leaveGroupResponseNoError = []byte{0x00, 0x00} + leaveGroupResponseWithError = []byte{0, 25} +) + +func TestLeaveGroupResponse(t *testing.T) { + var response *LeaveGroupResponse + + response = new(LeaveGroupResponse) + testVersionDecodable(t, "no error", response, leaveGroupResponseNoError, 0) + if response.Err != ErrNoError { + t.Error("Decoding error failed: no error expected but found", response.Err) + } + + response = new(LeaveGroupResponse) + testVersionDecodable(t, "with error", response, leaveGroupResponseWithError, 0) + if response.Err != ErrUnknownMemberId { + t.Error("Decoding error failed: ErrUnknownMemberId expected but found", response.Err) + } +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request_test.go b/vendor/github.com/Shopify/sarama/list_groups_request_test.go new file mode 100644 index 0000000000..2e977d9a58 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_request_test.go @@ -0,0 +1,7 @@ +package sarama + +import "testing" + +func TestListGroupsRequest(t *testing.T) { + testRequest(t, "ListGroupsRequest", &ListGroupsRequest{}, []byte{}) +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response_test.go b/vendor/github.com/Shopify/sarama/list_groups_response_test.go new file mode 100644 index 0000000000..41ab822f9f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_response_test.go @@ -0,0 +1,58 @@ +package sarama + +import ( + "testing" +) + +var ( + listGroupsResponseEmpty = []byte{ + 0, 0, // no error + 0, 0, 0, 0, // no groups + } + + listGroupsResponseError = []byte{ + 0, 31, // no error + 0, 0, 0, 0, // ErrClusterAuthorizationFailed + } + + listGroupsResponseWithConsumer = []byte{ + 0, 0, // no error + 0, 0, 0, 1, // 1 group + 0, 3, 'f', 'o', 'o', // group name + 0, 8, 'c', 'o', 'n', 's', 'u', 'm', 'e', 'r', // protocol type + } +) + +func TestListGroupsResponse(t *testing.T) { + var response *ListGroupsResponse + + response = new(ListGroupsResponse) + testVersionDecodable(t, "no error", response, listGroupsResponseEmpty, 0) + if response.Err != ErrNoError { + t.Error("Expected no gerror, found:", response.Err) + } + if len(response.Groups) != 0 { + t.Error("Expected no groups") + } + + response = new(ListGroupsResponse) + testVersionDecodable(t, "no error", response, listGroupsResponseError, 0) + if response.Err != ErrClusterAuthorizationFailed { + t.Error("Expected no gerror, found:", response.Err) + } + if len(response.Groups) != 0 { + t.Error("Expected no groups") + } + + response = new(ListGroupsResponse) + testVersionDecodable(t, "no error", response, listGroupsResponseWithConsumer, 0) + if response.Err != ErrNoError { + t.Error("Expected no gerror, found:", response.Err) + } + if len(response.Groups) != 1 { + t.Error("Expected one group") + } + if response.Groups["foo"] != "consumer" { + t.Error("Expected foo group to use consumer protocol") + } +} diff --git a/vendor/github.com/Shopify/sarama/message_test.go b/vendor/github.com/Shopify/sarama/message_test.go new file mode 100644 index 0000000000..1dae896fee --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message_test.go @@ -0,0 +1,113 @@ +package sarama + +import "testing" + +var ( + emptyMessage = []byte{ + 167, 236, 104, 3, // CRC + 0x00, // magic version byte + 0x00, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + 0xFF, 0xFF, 0xFF, 0xFF} // value + + emptyGzipMessage = []byte{ + 97, 79, 149, 90, //CRC + 0x00, // magic version byte + 0x01, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + // value + 0x00, 0x00, 0x00, 0x17, + 0x1f, 0x8b, + 0x08, + 0, 0, 9, 110, 136, 0, 255, 1, 0, 0, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0} + + emptyBulkSnappyMessage = []byte{ + 180, 47, 53, 209, //CRC + 0x00, // magic version byte + 0x02, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + 0, 0, 0, 42, + 130, 83, 78, 65, 80, 80, 89, 0, // SNAPPY magic + 0, 0, 0, 1, // min version + 0, 0, 0, 1, // default version + 0, 0, 0, 22, 52, 0, 0, 25, 1, 16, 14, 227, 138, 104, 118, 25, 15, 13, 1, 8, 1, 0, 0, 62, 26, 0} + + emptyBulkGzipMessage = []byte{ + 139, 160, 63, 141, //CRC + 0x00, // magic version byte + 0x01, // attribute flags + 0xFF, 0xFF, 0xFF, 0xFF, // key + 0x00, 0x00, 0x00, 0x27, // len + 0x1f, 0x8b, // Gzip Magic + 0x08, // deflate compressed + 0, 0, 0, 0, 0, 0, 0, 99, 96, 128, 3, 190, 202, 112, 143, 7, 12, 12, 255, 129, 0, 33, 200, 192, 136, 41, 3, 0, 199, 226, 155, 70, 52, 0, 0, 0} +) + +func TestMessageEncoding(t *testing.T) { + message := Message{} + testEncodable(t, "empty", &message, emptyMessage) + + message.Value = []byte{} + message.Codec = CompressionGZIP + testEncodable(t, "empty gzip", &message, emptyGzipMessage) +} + +func TestMessageDecoding(t *testing.T) { + message := Message{} + testDecodable(t, "empty", &message, emptyMessage) + if message.Codec != CompressionNone { + t.Error("Decoding produced compression codec where there was none.") + } + if message.Key != nil { + t.Error("Decoding produced key where there was none.") + } + if message.Value != nil { + t.Error("Decoding produced value where there was none.") + } + if message.Set != nil { + t.Error("Decoding produced set where there was none.") + } + + testDecodable(t, "empty gzip", &message, emptyGzipMessage) + if message.Codec != CompressionGZIP { + t.Error("Decoding produced incorrect compression codec (was gzip).") + } + if message.Key != nil { + t.Error("Decoding produced key where there was none.") + } + if message.Value == nil || len(message.Value) != 0 { + t.Error("Decoding produced nil or content-ful value where there was an empty array.") + } +} + +func TestMessageDecodingBulkSnappy(t *testing.T) { + message := Message{} + testDecodable(t, "bulk snappy", &message, emptyBulkSnappyMessage) + if message.Codec != CompressionSnappy { + t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionSnappy) + } + if message.Key != nil { + t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) + } + if message.Set == nil { + t.Error("Decoding produced no set, but one was expected.") + } else if len(message.Set.Messages) != 2 { + t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) + } +} + +func TestMessageDecodingBulkGzip(t *testing.T) { + message := Message{} + testDecodable(t, "bulk gzip", &message, emptyBulkGzipMessage) + if message.Codec != CompressionGZIP { + t.Errorf("Decoding produced codec %d, but expected %d.", message.Codec, CompressionGZIP) + } + if message.Key != nil { + t.Errorf("Decoding produced key %+v, but none was expected.", message.Key) + } + if message.Set == nil { + t.Error("Decoding produced no set, but one was expected.") + } else if len(message.Set.Messages) != 2 { + t.Errorf("Decoding produced a set with %d messages, but 2 were expected.", len(message.Set.Messages)) + } +} diff --git a/vendor/github.com/Shopify/sarama/metadata_request_test.go b/vendor/github.com/Shopify/sarama/metadata_request_test.go new file mode 100644 index 0000000000..44f3146e4a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_request_test.go @@ -0,0 +1,29 @@ +package sarama + +import "testing" + +var ( + metadataRequestNoTopics = []byte{ + 0x00, 0x00, 0x00, 0x00} + + metadataRequestOneTopic = []byte{ + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x06, 't', 'o', 'p', 'i', 'c', '1'} + + metadataRequestThreeTopics = []byte{ + 0x00, 0x00, 0x00, 0x03, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x03, 'b', 'a', 'r', + 0x00, 0x03, 'b', 'a', 'z'} +) + +func TestMetadataRequest(t *testing.T) { + request := new(MetadataRequest) + testRequest(t, "no topics", request, metadataRequestNoTopics) + + request.Topics = []string{"topic1"} + testRequest(t, "one topic", request, metadataRequestOneTopic) + + request.Topics = []string{"foo", "bar", "baz"} + testRequest(t, "three topics", request, metadataRequestThreeTopics) +} diff --git a/vendor/github.com/Shopify/sarama/metadata_response_test.go b/vendor/github.com/Shopify/sarama/metadata_response_test.go new file mode 100644 index 0000000000..ea62a4f1b3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_response_test.go @@ -0,0 +1,139 @@ +package sarama + +import "testing" + +var ( + emptyMetadataResponse = []byte{ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + brokersNoTopicsMetadataResponse = []byte{ + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x00, 0xab, 0xff, + 0x00, 0x09, 'l', 'o', 'c', 'a', 'l', 'h', 'o', 's', 't', + 0x00, 0x00, 0x00, 0x33, + + 0x00, 0x01, 0x02, 0x03, + 0x00, 0x0a, 'g', 'o', 'o', 'g', 'l', 'e', '.', 'c', 'o', 'm', + 0x00, 0x00, 0x01, 0x11, + + 0x00, 0x00, 0x00, 0x00} + + topicsNoBrokersMetadataResponse = []byte{ + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x00, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x04, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x07, + 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x00, + 0x00, 0x03, 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x00} +) + +func TestEmptyMetadataResponse(t *testing.T) { + response := MetadataResponse{} + + testVersionDecodable(t, "empty", &response, emptyMetadataResponse, 0) + if len(response.Brokers) != 0 { + t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") + } + if len(response.Topics) != 0 { + t.Error("Decoding produced", len(response.Topics), "topics where there were none!") + } +} + +func TestMetadataResponseWithBrokers(t *testing.T) { + response := MetadataResponse{} + + testVersionDecodable(t, "brokers, no topics", &response, brokersNoTopicsMetadataResponse, 0) + if len(response.Brokers) != 2 { + t.Fatal("Decoding produced", len(response.Brokers), "brokers where there were two!") + } + + if response.Brokers[0].id != 0xabff { + t.Error("Decoding produced invalid broker 0 id.") + } + if response.Brokers[0].addr != "localhost:51" { + t.Error("Decoding produced invalid broker 0 address.") + } + if response.Brokers[1].id != 0x010203 { + t.Error("Decoding produced invalid broker 1 id.") + } + if response.Brokers[1].addr != "google.com:273" { + t.Error("Decoding produced invalid broker 1 address.") + } + + if len(response.Topics) != 0 { + t.Error("Decoding produced", len(response.Topics), "topics where there were none!") + } +} + +func TestMetadataResponseWithTopics(t *testing.T) { + response := MetadataResponse{} + + testVersionDecodable(t, "topics, no brokers", &response, topicsNoBrokersMetadataResponse, 0) + if len(response.Brokers) != 0 { + t.Error("Decoding produced", len(response.Brokers), "brokers where there were none!") + } + + if len(response.Topics) != 2 { + t.Fatal("Decoding produced", len(response.Topics), "topics where there were two!") + } + + if response.Topics[0].Err != ErrNoError { + t.Error("Decoding produced invalid topic 0 error.") + } + + if response.Topics[0].Name != "foo" { + t.Error("Decoding produced invalid topic 0 name.") + } + + if len(response.Topics[0].Partitions) != 1 { + t.Fatal("Decoding produced invalid partition count for topic 0.") + } + + if response.Topics[0].Partitions[0].Err != ErrInvalidMessageSize { + t.Error("Decoding produced invalid topic 0 partition 0 error.") + } + + if response.Topics[0].Partitions[0].ID != 0x01 { + t.Error("Decoding produced invalid topic 0 partition 0 id.") + } + + if response.Topics[0].Partitions[0].Leader != 0x07 { + t.Error("Decoding produced invalid topic 0 partition 0 leader.") + } + + if len(response.Topics[0].Partitions[0].Replicas) != 3 { + t.Fatal("Decoding produced invalid topic 0 partition 0 replicas.") + } + for i := 0; i < 3; i++ { + if response.Topics[0].Partitions[0].Replicas[i] != int32(i+1) { + t.Error("Decoding produced invalid topic 0 partition 0 replica", i) + } + } + + if len(response.Topics[0].Partitions[0].Isr) != 0 { + t.Error("Decoding produced invalid topic 0 partition 0 isr length.") + } + + if response.Topics[1].Err != ErrNoError { + t.Error("Decoding produced invalid topic 1 error.") + } + + if response.Topics[1].Name != "bar" { + t.Error("Decoding produced invalid topic 0 name.") + } + + if len(response.Topics[1].Partitions) != 0 { + t.Error("Decoding produced invalid partition count for topic 1.") + } +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request_test.go b/vendor/github.com/Shopify/sarama/offset_commit_request_test.go new file mode 100644 index 0000000000..afc25b7b38 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_request_test.go @@ -0,0 +1,90 @@ +package sarama + +import "testing" + +var ( + offsetCommitRequestNoBlocksV0 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x00} + + offsetCommitRequestNoBlocksV1 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x00} + + offsetCommitRequestNoBlocksV2 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, + 0x00, 0x00, 0x00, 0x00} + + offsetCommitRequestOneBlockV0 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x52, 0x21, + 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, + 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} + + offsetCommitRequestOneBlockV1 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x52, 0x21, + 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} + + offsetCommitRequestOneBlockV2 = []byte{ + 0x00, 0x06, 'f', 'o', 'o', 'b', 'a', 'r', + 0x00, 0x00, 0x11, 0x22, + 0x00, 0x04, 'c', 'o', 'n', 's', + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x33, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x52, 0x21, + 0x00, 0x00, 0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF, + 0x00, 0x08, 'm', 'e', 't', 'a', 'd', 'a', 't', 'a'} +) + +func TestOffsetCommitRequestV0(t *testing.T) { + request := new(OffsetCommitRequest) + request.Version = 0 + request.ConsumerGroup = "foobar" + testRequest(t, "no blocks v0", request, offsetCommitRequestNoBlocksV0) + + request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") + testRequest(t, "one block v0", request, offsetCommitRequestOneBlockV0) +} + +func TestOffsetCommitRequestV1(t *testing.T) { + request := new(OffsetCommitRequest) + request.ConsumerGroup = "foobar" + request.ConsumerID = "cons" + request.ConsumerGroupGeneration = 0x1122 + request.Version = 1 + testRequest(t, "no blocks v1", request, offsetCommitRequestNoBlocksV1) + + request.AddBlock("topic", 0x5221, 0xDEADBEEF, ReceiveTime, "metadata") + testRequest(t, "one block v1", request, offsetCommitRequestOneBlockV1) +} + +func TestOffsetCommitRequestV2(t *testing.T) { + request := new(OffsetCommitRequest) + request.ConsumerGroup = "foobar" + request.ConsumerID = "cons" + request.ConsumerGroupGeneration = 0x1122 + request.RetentionTime = 0x4433 + request.Version = 2 + testRequest(t, "no blocks v2", request, offsetCommitRequestNoBlocksV2) + + request.AddBlock("topic", 0x5221, 0xDEADBEEF, 0, "metadata") + testRequest(t, "one block v2", request, offsetCommitRequestOneBlockV2) +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response_test.go b/vendor/github.com/Shopify/sarama/offset_commit_response_test.go new file mode 100644 index 0000000000..074ec92322 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_response_test.go @@ -0,0 +1,24 @@ +package sarama + +import ( + "testing" +) + +var ( + emptyOffsetCommitResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} +) + +func TestEmptyOffsetCommitResponse(t *testing.T) { + response := OffsetCommitResponse{} + testResponse(t, "empty", &response, emptyOffsetCommitResponse) +} + +func TestNormalOffsetCommitResponse(t *testing.T) { + response := OffsetCommitResponse{} + response.AddError("t", 0, ErrNotLeaderForPartition) + response.Errors["m"] = make(map[int32]KError) + // The response encoded form cannot be checked for it varies due to + // unpredictable map traversal order. + testResponse(t, "normal", &response, nil) +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go new file mode 100644 index 0000000000..025d725c98 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request_test.go @@ -0,0 +1,31 @@ +package sarama + +import "testing" + +var ( + offsetFetchRequestNoGroupNoPartitions = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + offsetFetchRequestNoPartitions = []byte{ + 0x00, 0x04, 'b', 'l', 'a', 'h', + 0x00, 0x00, 0x00, 0x00} + + offsetFetchRequestOnePartition = []byte{ + 0x00, 0x04, 'b', 'l', 'a', 'h', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x0D, 't', 'o', 'p', 'i', 'c', 'T', 'h', 'e', 'F', 'i', 'r', 's', 't', + 0x00, 0x00, 0x00, 0x01, + 0x4F, 0x4F, 0x4F, 0x4F} +) + +func TestOffsetFetchRequest(t *testing.T) { + request := new(OffsetFetchRequest) + testRequest(t, "no group, no partitions", request, offsetFetchRequestNoGroupNoPartitions) + + request.ConsumerGroup = "blah" + testRequest(t, "no partitions", request, offsetFetchRequestNoPartitions) + + request.AddPartition("topicTheFirst", 0x4F4F4F4F) + testRequest(t, "one partition", request, offsetFetchRequestOnePartition) +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go b/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go new file mode 100644 index 0000000000..7614ae4249 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response_test.go @@ -0,0 +1,22 @@ +package sarama + +import "testing" + +var ( + emptyOffsetFetchResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} +) + +func TestEmptyOffsetFetchResponse(t *testing.T) { + response := OffsetFetchResponse{} + testResponse(t, "empty", &response, emptyOffsetFetchResponse) +} + +func TestNormalOffsetFetchResponse(t *testing.T) { + response := OffsetFetchResponse{} + response.AddBlock("t", 0, &OffsetFetchResponseBlock{0, "md", ErrRequestTimedOut}) + response.Blocks["m"] = nil + // The response encoded form cannot be checked for it varies due to + // unpredictable map traversal order. + testResponse(t, "normal", &response, nil) +} diff --git a/vendor/github.com/Shopify/sarama/offset_manager_test.go b/vendor/github.com/Shopify/sarama/offset_manager_test.go new file mode 100644 index 0000000000..c111a5a634 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_manager_test.go @@ -0,0 +1,369 @@ +package sarama + +import ( + "testing" + "time" +) + +func initOffsetManager(t *testing.T) (om OffsetManager, + testClient Client, broker, coordinator *MockBroker) { + + config := NewConfig() + config.Metadata.Retry.Max = 1 + config.Consumer.Offsets.CommitInterval = 1 * time.Millisecond + config.Version = V0_9_0_0 + + broker = NewMockBroker(t, 1) + coordinator = NewMockBroker(t, 2) + + seedMeta := new(MetadataResponse) + seedMeta.AddBroker(coordinator.Addr(), coordinator.BrokerID()) + seedMeta.AddTopicPartition("my_topic", 0, 1, []int32{}, []int32{}, ErrNoError) + seedMeta.AddTopicPartition("my_topic", 1, 1, []int32{}, []int32{}, ErrNoError) + broker.Returns(seedMeta) + + var err error + testClient, err = NewClient([]string{broker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + broker.Returns(&ConsumerMetadataResponse{ + CoordinatorID: coordinator.BrokerID(), + CoordinatorHost: "127.0.0.1", + CoordinatorPort: coordinator.Port(), + }) + + om, err = NewOffsetManagerFromClient("group", testClient) + if err != nil { + t.Fatal(err) + } + + return om, testClient, broker, coordinator +} + +func initPartitionOffsetManager(t *testing.T, om OffsetManager, + coordinator *MockBroker, initialOffset int64, metadata string) PartitionOffsetManager { + + fetchResponse := new(OffsetFetchResponse) + fetchResponse.AddBlock("my_topic", 0, &OffsetFetchResponseBlock{ + Err: ErrNoError, + Offset: initialOffset, + Metadata: metadata, + }) + coordinator.Returns(fetchResponse) + + pom, err := om.ManagePartition("my_topic", 0) + if err != nil { + t.Fatal(err) + } + + return pom +} + +func TestNewOffsetManager(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + seedBroker.Returns(new(MetadataResponse)) + + testClient, err := NewClient([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + _, err = NewOffsetManagerFromClient("group", testClient) + if err != nil { + t.Error(err) + } + + safeClose(t, testClient) + + _, err = NewOffsetManagerFromClient("group", testClient) + if err != ErrClosedClient { + t.Errorf("Error expected for closed client; actual value: %v", err) + } + + seedBroker.Close() +} + +// Test recovery from ErrNotCoordinatorForConsumer +// on first fetchInitialOffset call +func TestOffsetManagerFetchInitialFail(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + + // Error on first fetchInitialOffset call + responseBlock := OffsetFetchResponseBlock{ + Err: ErrNotCoordinatorForConsumer, + Offset: 5, + Metadata: "test_meta", + } + + fetchResponse := new(OffsetFetchResponse) + fetchResponse.AddBlock("my_topic", 0, &responseBlock) + coordinator.Returns(fetchResponse) + + // Refresh coordinator + newCoordinator := NewMockBroker(t, 3) + broker.Returns(&ConsumerMetadataResponse{ + CoordinatorID: newCoordinator.BrokerID(), + CoordinatorHost: "127.0.0.1", + CoordinatorPort: newCoordinator.Port(), + }) + + // Second fetchInitialOffset call is fine + fetchResponse2 := new(OffsetFetchResponse) + responseBlock2 := responseBlock + responseBlock2.Err = ErrNoError + fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) + newCoordinator.Returns(fetchResponse2) + + pom, err := om.ManagePartition("my_topic", 0) + if err != nil { + t.Error(err) + } + + broker.Close() + coordinator.Close() + newCoordinator.Close() + safeClose(t, pom) + safeClose(t, om) + safeClose(t, testClient) +} + +// Test fetchInitialOffset retry on ErrOffsetsLoadInProgress +func TestOffsetManagerFetchInitialLoadInProgress(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + + // Error on first fetchInitialOffset call + responseBlock := OffsetFetchResponseBlock{ + Err: ErrOffsetsLoadInProgress, + Offset: 5, + Metadata: "test_meta", + } + + fetchResponse := new(OffsetFetchResponse) + fetchResponse.AddBlock("my_topic", 0, &responseBlock) + coordinator.Returns(fetchResponse) + + // Second fetchInitialOffset call is fine + fetchResponse2 := new(OffsetFetchResponse) + responseBlock2 := responseBlock + responseBlock2.Err = ErrNoError + fetchResponse2.AddBlock("my_topic", 0, &responseBlock2) + coordinator.Returns(fetchResponse2) + + pom, err := om.ManagePartition("my_topic", 0) + if err != nil { + t.Error(err) + } + + broker.Close() + coordinator.Close() + safeClose(t, pom) + safeClose(t, om) + safeClose(t, testClient) +} + +func TestPartitionOffsetManagerInitialOffset(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + testClient.Config().Consumer.Offsets.Initial = OffsetOldest + + // Kafka returns -1 if no offset has been stored for this partition yet. + pom := initPartitionOffsetManager(t, om, coordinator, -1, "") + + offset, meta := pom.NextOffset() + if offset != OffsetOldest { + t.Errorf("Expected offset 5. Actual: %v", offset) + } + if meta != "" { + t.Errorf("Expected metadata to be empty. Actual: %q", meta) + } + + safeClose(t, pom) + safeClose(t, om) + broker.Close() + coordinator.Close() + safeClose(t, testClient) +} + +func TestPartitionOffsetManagerNextOffset(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + pom := initPartitionOffsetManager(t, om, coordinator, 5, "test_meta") + + offset, meta := pom.NextOffset() + if offset != 5 { + t.Errorf("Expected offset 5. Actual: %v", offset) + } + if meta != "test_meta" { + t.Errorf("Expected metadata \"test_meta\". Actual: %q", meta) + } + + safeClose(t, pom) + safeClose(t, om) + broker.Close() + coordinator.Close() + safeClose(t, testClient) +} + +func TestPartitionOffsetManagerMarkOffset(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") + + ocResponse := new(OffsetCommitResponse) + ocResponse.AddError("my_topic", 0, ErrNoError) + coordinator.Returns(ocResponse) + + pom.MarkOffset(100, "modified_meta") + offset, meta := pom.NextOffset() + + if offset != 100 { + t.Errorf("Expected offset 100. Actual: %v", offset) + } + if meta != "modified_meta" { + t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) + } + + safeClose(t, pom) + safeClose(t, om) + safeClose(t, testClient) + broker.Close() + coordinator.Close() +} + +func TestPartitionOffsetManagerMarkOffsetWithRetention(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + testClient.Config().Consumer.Offsets.Retention = time.Hour + + pom := initPartitionOffsetManager(t, om, coordinator, 5, "original_meta") + + ocResponse := new(OffsetCommitResponse) + ocResponse.AddError("my_topic", 0, ErrNoError) + handler := func(req *request) (res encoder) { + if req.body.version() != 2 { + t.Errorf("Expected to be using version 2. Actual: %v", req.body.version()) + } + offsetCommitRequest := req.body.(*OffsetCommitRequest) + if offsetCommitRequest.RetentionTime != (60 * 60 * 1000) { + t.Errorf("Expected an hour retention time. Actual: %v", offsetCommitRequest.RetentionTime) + } + return ocResponse + } + coordinator.setHandler(handler) + + pom.MarkOffset(100, "modified_meta") + offset, meta := pom.NextOffset() + + if offset != 100 { + t.Errorf("Expected offset 100. Actual: %v", offset) + } + if meta != "modified_meta" { + t.Errorf("Expected metadata \"modified_meta\". Actual: %q", meta) + } + + safeClose(t, pom) + safeClose(t, om) + safeClose(t, testClient) + broker.Close() + coordinator.Close() +} + +func TestPartitionOffsetManagerCommitErr(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") + + // Error on one partition + ocResponse := new(OffsetCommitResponse) + ocResponse.AddError("my_topic", 0, ErrOffsetOutOfRange) + ocResponse.AddError("my_topic", 1, ErrNoError) + coordinator.Returns(ocResponse) + + newCoordinator := NewMockBroker(t, 3) + + // For RefreshCoordinator() + broker.Returns(&ConsumerMetadataResponse{ + CoordinatorID: newCoordinator.BrokerID(), + CoordinatorHost: "127.0.0.1", + CoordinatorPort: newCoordinator.Port(), + }) + + // Nothing in response.Errors at all + ocResponse2 := new(OffsetCommitResponse) + newCoordinator.Returns(ocResponse2) + + // For RefreshCoordinator() + broker.Returns(&ConsumerMetadataResponse{ + CoordinatorID: newCoordinator.BrokerID(), + CoordinatorHost: "127.0.0.1", + CoordinatorPort: newCoordinator.Port(), + }) + + // Error on the wrong partition for this pom + ocResponse3 := new(OffsetCommitResponse) + ocResponse3.AddError("my_topic", 1, ErrNoError) + newCoordinator.Returns(ocResponse3) + + // For RefreshCoordinator() + broker.Returns(&ConsumerMetadataResponse{ + CoordinatorID: newCoordinator.BrokerID(), + CoordinatorHost: "127.0.0.1", + CoordinatorPort: newCoordinator.Port(), + }) + + // ErrUnknownTopicOrPartition/ErrNotLeaderForPartition/ErrLeaderNotAvailable block + ocResponse4 := new(OffsetCommitResponse) + ocResponse4.AddError("my_topic", 0, ErrUnknownTopicOrPartition) + newCoordinator.Returns(ocResponse4) + + // For RefreshCoordinator() + broker.Returns(&ConsumerMetadataResponse{ + CoordinatorID: newCoordinator.BrokerID(), + CoordinatorHost: "127.0.0.1", + CoordinatorPort: newCoordinator.Port(), + }) + + // Normal error response + ocResponse5 := new(OffsetCommitResponse) + ocResponse5.AddError("my_topic", 0, ErrNoError) + newCoordinator.Returns(ocResponse5) + + pom.MarkOffset(100, "modified_meta") + + err := pom.Close() + if err != nil { + t.Error(err) + } + + broker.Close() + coordinator.Close() + newCoordinator.Close() + safeClose(t, om) + safeClose(t, testClient) +} + +// Test of recovery from abort +func TestAbortPartitionOffsetManager(t *testing.T) { + om, testClient, broker, coordinator := initOffsetManager(t) + pom := initPartitionOffsetManager(t, om, coordinator, 5, "meta") + + // this triggers an error in the CommitOffset request, + // which leads to the abort call + coordinator.Close() + + // Response to refresh coordinator request + newCoordinator := NewMockBroker(t, 3) + broker.Returns(&ConsumerMetadataResponse{ + CoordinatorID: newCoordinator.BrokerID(), + CoordinatorHost: "127.0.0.1", + CoordinatorPort: newCoordinator.Port(), + }) + + ocResponse := new(OffsetCommitResponse) + ocResponse.AddError("my_topic", 0, ErrNoError) + newCoordinator.Returns(ocResponse) + + pom.MarkOffset(100, "modified_meta") + + safeClose(t, pom) + safeClose(t, om) + broker.Close() + safeClose(t, testClient) +} diff --git a/vendor/github.com/Shopify/sarama/offset_request_test.go b/vendor/github.com/Shopify/sarama/offset_request_test.go new file mode 100644 index 0000000000..f3b3046bbb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_request_test.go @@ -0,0 +1,26 @@ +package sarama + +import "testing" + +var ( + offsetRequestNoBlocks = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x00} + + offsetRequestOneBlock = []byte{ + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02} +) + +func TestOffsetRequest(t *testing.T) { + request := new(OffsetRequest) + testRequest(t, "no blocks", request, offsetRequestNoBlocks) + + request.AddBlock("foo", 4, 1, 2) + testRequest(t, "one block", request, offsetRequestOneBlock) +} diff --git a/vendor/github.com/Shopify/sarama/offset_response_test.go b/vendor/github.com/Shopify/sarama/offset_response_test.go new file mode 100644 index 0000000000..fc00f4b60c --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_response_test.go @@ -0,0 +1,62 @@ +package sarama + +import "testing" + +var ( + emptyOffsetResponse = []byte{ + 0x00, 0x00, 0x00, 0x00} + + normalOffsetResponse = []byte{ + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x01, 'a', + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x01, 'z', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06} +) + +func TestEmptyOffsetResponse(t *testing.T) { + response := OffsetResponse{} + + testVersionDecodable(t, "empty", &response, emptyOffsetResponse, 0) + if len(response.Blocks) != 0 { + t.Error("Decoding produced", len(response.Blocks), "topics where there were none.") + } +} + +func TestNormalOffsetResponse(t *testing.T) { + response := OffsetResponse{} + + testVersionDecodable(t, "normal", &response, normalOffsetResponse, 0) + + if len(response.Blocks) != 2 { + t.Fatal("Decoding produced", len(response.Blocks), "topics where there were two.") + } + + if len(response.Blocks["a"]) != 0 { + t.Fatal("Decoding produced", len(response.Blocks["a"]), "partitions for topic 'a' where there were none.") + } + + if len(response.Blocks["z"]) != 1 { + t.Fatal("Decoding produced", len(response.Blocks["z"]), "partitions for topic 'z' where there was one.") + } + + if response.Blocks["z"][2].Err != ErrNoError { + t.Fatal("Decoding produced invalid error for topic z partition 2.") + } + + if len(response.Blocks["z"][2].Offsets) != 2 { + t.Fatal("Decoding produced invalid number of offsets for topic z partition 2.") + } + + if response.Blocks["z"][2].Offsets[0] != 5 || response.Blocks["z"][2].Offsets[1] != 6 { + t.Fatal("Decoding produced invalid offsets for topic z partition 2.") + } + +} diff --git a/vendor/github.com/Shopify/sarama/partitioner_test.go b/vendor/github.com/Shopify/sarama/partitioner_test.go new file mode 100644 index 0000000000..3d391c59c6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/partitioner_test.go @@ -0,0 +1,215 @@ +package sarama + +import ( + "crypto/rand" + "log" + "testing" +) + +func assertPartitioningConsistent(t *testing.T, partitioner Partitioner, message *ProducerMessage, numPartitions int32) { + choice, err := partitioner.Partition(message, numPartitions) + if err != nil { + t.Error(partitioner, err) + } + if choice < 0 || choice >= numPartitions { + t.Error(partitioner, "returned partition", choice, "outside of range for", message) + } + for i := 1; i < 50; i++ { + newChoice, err := partitioner.Partition(message, numPartitions) + if err != nil { + t.Error(partitioner, err) + } + if newChoice != choice { + t.Error(partitioner, "returned partition", newChoice, "inconsistent with", choice, ".") + } + } +} + +func TestRandomPartitioner(t *testing.T) { + partitioner := NewRandomPartitioner("mytopic") + + choice, err := partitioner.Partition(nil, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + for i := 1; i < 50; i++ { + choice, err := partitioner.Partition(nil, 50) + if err != nil { + t.Error(partitioner, err) + } + if choice < 0 || choice >= 50 { + t.Error("Returned partition", choice, "outside of range.") + } + } +} + +func TestRoundRobinPartitioner(t *testing.T) { + partitioner := NewRoundRobinPartitioner("mytopic") + + choice, err := partitioner.Partition(nil, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + var i int32 + for i = 1; i < 50; i++ { + choice, err := partitioner.Partition(nil, 7) + if err != nil { + t.Error(partitioner, err) + } + if choice != i%7 { + t.Error("Returned partition", choice, "expecting", i%7) + } + } +} + +func TestHashPartitioner(t *testing.T) { + partitioner := NewHashPartitioner("mytopic") + + choice, err := partitioner.Partition(&ProducerMessage{}, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + for i := 1; i < 50; i++ { + choice, err := partitioner.Partition(&ProducerMessage{}, 50) + if err != nil { + t.Error(partitioner, err) + } + if choice < 0 || choice >= 50 { + t.Error("Returned partition", choice, "outside of range for nil key.") + } + } + + buf := make([]byte, 256) + for i := 1; i < 50; i++ { + if _, err := rand.Read(buf); err != nil { + t.Error(err) + } + assertPartitioningConsistent(t, partitioner, &ProducerMessage{Key: ByteEncoder(buf)}, 50) + } +} + +func TestHashPartitionerMinInt32(t *testing.T) { + partitioner := NewHashPartitioner("mytopic") + + msg := ProducerMessage{} + // "1468509572224" generates 2147483648 (uint32) result from Sum32 function + // which is -2147483648 or int32's min value + msg.Key = StringEncoder("1468509572224") + + choice, err := partitioner.Partition(&msg, 50) + if err != nil { + t.Error(partitioner, err) + } + if choice < 0 || choice >= 50 { + t.Error("Returned partition", choice, "outside of range for nil key.") + } +} + +func TestManualPartitioner(t *testing.T) { + partitioner := NewManualPartitioner("mytopic") + + choice, err := partitioner.Partition(&ProducerMessage{}, 1) + if err != nil { + t.Error(partitioner, err) + } + if choice != 0 { + t.Error("Returned non-zero partition when only one available.") + } + + for i := int32(1); i < 50; i++ { + choice, err := partitioner.Partition(&ProducerMessage{Partition: i}, 50) + if err != nil { + t.Error(partitioner, err) + } + if choice != i { + t.Error("Returned partition not the same as the input partition") + } + } +} + +// By default, Sarama uses the message's key to consistently assign a partition to +// a message using hashing. If no key is set, a random partition will be chosen. +// This example shows how you can partition messages randomly, even when a key is set, +// by overriding Config.Producer.Partitioner. +func ExamplePartitioner_random() { + config := NewConfig() + config.Producer.Partitioner = NewRandomPartitioner + + producer, err := NewSyncProducer([]string{"localhost:9092"}, config) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := producer.Close(); err != nil { + log.Println("Failed to close producer:", err) + } + }() + + msg := &ProducerMessage{Topic: "test", Key: StringEncoder("key is set"), Value: StringEncoder("test")} + partition, offset, err := producer.SendMessage(msg) + if err != nil { + log.Fatalln("Failed to produce message to kafka cluster.") + } + + log.Printf("Produced message to partition %d with offset %d", partition, offset) +} + +// This example shows how to assign partitions to your messages manually. +func ExamplePartitioner_manual() { + config := NewConfig() + + // First, we tell the producer that we are going to partition ourselves. + config.Producer.Partitioner = NewManualPartitioner + + producer, err := NewSyncProducer([]string{"localhost:9092"}, config) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := producer.Close(); err != nil { + log.Println("Failed to close producer:", err) + } + }() + + // Now, we set the Partition field of the ProducerMessage struct. + msg := &ProducerMessage{Topic: "test", Partition: 6, Value: StringEncoder("test")} + + partition, offset, err := producer.SendMessage(msg) + if err != nil { + log.Fatalln("Failed to produce message to kafka cluster.") + } + + if partition != 6 { + log.Fatal("Message should have been produced to partition 6!") + } + + log.Printf("Produced message to partition %d with offset %d", partition, offset) +} + +// This example shows how to set a different partitioner depending on the topic. +func ExamplePartitioner_per_topic() { + config := NewConfig() + config.Producer.Partitioner = func(topic string) Partitioner { + switch topic { + case "access_log", "error_log": + return NewRandomPartitioner(topic) + + default: + return NewHashPartitioner(topic) + } + } + + // ... +} diff --git a/vendor/github.com/Shopify/sarama/produce_request_test.go b/vendor/github.com/Shopify/sarama/produce_request_test.go new file mode 100644 index 0000000000..21f4ba5b14 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_request_test.go @@ -0,0 +1,47 @@ +package sarama + +import ( + "testing" +) + +var ( + produceRequestEmpty = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00} + + produceRequestHeader = []byte{ + 0x01, 0x23, + 0x00, 0x00, 0x04, 0x44, + 0x00, 0x00, 0x00, 0x00} + + produceRequestOneMessage = []byte{ + 0x01, 0x23, + 0x00, 0x00, 0x04, 0x44, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x05, 't', 'o', 'p', 'i', 'c', + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0xAD, + 0x00, 0x00, 0x00, 0x1C, + // messageSet + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x10, + // message + 0x23, 0x96, 0x4a, 0xf7, // CRC + 0x00, + 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, + 0x00, 0x00, 0x00, 0x02, 0x00, 0xEE} +) + +func TestProduceRequest(t *testing.T) { + request := new(ProduceRequest) + testRequest(t, "empty", request, produceRequestEmpty) + + request.RequiredAcks = 0x123 + request.Timeout = 0x444 + testRequest(t, "header", request, produceRequestHeader) + + request.AddMessage("topic", 0xAD, &Message{Codec: CompressionNone, Key: nil, Value: []byte{0x00, 0xEE}}) + testRequest(t, "one message", request, produceRequestOneMessage) +} diff --git a/vendor/github.com/Shopify/sarama/produce_response_test.go b/vendor/github.com/Shopify/sarama/produce_response_test.go new file mode 100644 index 0000000000..f71709fe80 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_response_test.go @@ -0,0 +1,67 @@ +package sarama + +import "testing" + +var ( + produceResponseNoBlocks = []byte{ + 0x00, 0x00, 0x00, 0x00} + + produceResponseManyBlocks = []byte{ + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x03, 'f', 'o', 'o', + 0x00, 0x00, 0x00, 0x00, + + 0x00, 0x03, 'b', 'a', 'r', + 0x00, 0x00, 0x00, 0x02, + + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, + + 0x00, 0x00, 0x00, 0x02, + 0x00, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} +) + +func TestProduceResponse(t *testing.T) { + response := ProduceResponse{} + + testVersionDecodable(t, "no blocks", &response, produceResponseNoBlocks, 0) + if len(response.Blocks) != 0 { + t.Error("Decoding produced", len(response.Blocks), "topics where there were none") + } + + testVersionDecodable(t, "many blocks", &response, produceResponseManyBlocks, 0) + if len(response.Blocks) != 2 { + t.Error("Decoding produced", len(response.Blocks), "topics where there were 2") + } + if len(response.Blocks["foo"]) != 0 { + t.Error("Decoding produced", len(response.Blocks["foo"]), "partitions for 'foo' where there were none") + } + if len(response.Blocks["bar"]) != 2 { + t.Error("Decoding produced", len(response.Blocks["bar"]), "partitions for 'bar' where there were two") + } + block := response.GetBlock("bar", 1) + if block == nil { + t.Error("Decoding did not produce a block for bar/1") + } else { + if block.Err != ErrNoError { + t.Error("Decoding failed for bar/1/Err, got:", int16(block.Err)) + } + if block.Offset != 0xFF { + t.Error("Decoding failed for bar/1/Offset, got:", block.Offset) + } + } + block = response.GetBlock("bar", 2) + if block == nil { + t.Error("Decoding did not produce a block for bar/2") + } else { + if block.Err != ErrInvalidMessage { + t.Error("Decoding failed for bar/2/Err, got:", int16(block.Err)) + } + if block.Offset != 0 { + t.Error("Decoding failed for bar/2/Offset, got:", block.Offset) + } + } +} diff --git a/vendor/github.com/Shopify/sarama/produce_set_test.go b/vendor/github.com/Shopify/sarama/produce_set_test.go new file mode 100644 index 0000000000..da62da9148 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_set_test.go @@ -0,0 +1,143 @@ +package sarama + +import ( + "testing" + "time" +) + +func makeProduceSet() (*asyncProducer, *produceSet) { + parent := &asyncProducer{ + conf: NewConfig(), + } + return parent, newProduceSet(parent) +} + +func safeAddMessage(t *testing.T, ps *produceSet, msg *ProducerMessage) { + if err := ps.add(msg); err != nil { + t.Error(err) + } +} + +func TestProduceSetInitial(t *testing.T) { + _, ps := makeProduceSet() + + if !ps.empty() { + t.Error("New produceSet should be empty") + } + + if ps.readyToFlush() { + t.Error("Empty produceSet must never be ready to flush") + } +} + +func TestProduceSetAddingMessages(t *testing.T) { + parent, ps := makeProduceSet() + parent.conf.Producer.Flush.MaxMessages = 1000 + + msg := &ProducerMessage{Key: StringEncoder(TestMessage), Value: StringEncoder(TestMessage)} + safeAddMessage(t, ps, msg) + + if ps.empty() { + t.Error("set shouldn't be empty when a message is added") + } + + if !ps.readyToFlush() { + t.Error("by default set should be ready to flush when any message is in place") + } + + for i := 0; i < 999; i++ { + if ps.wouldOverflow(msg) { + t.Error("set shouldn't fill up after only", i+1, "messages") + } + safeAddMessage(t, ps, msg) + } + + if !ps.wouldOverflow(msg) { + t.Error("set should be full after 1000 messages") + } +} + +func TestProduceSetPartitionTracking(t *testing.T) { + _, ps := makeProduceSet() + + m1 := &ProducerMessage{Topic: "t1", Partition: 0} + m2 := &ProducerMessage{Topic: "t1", Partition: 1} + m3 := &ProducerMessage{Topic: "t2", Partition: 0} + safeAddMessage(t, ps, m1) + safeAddMessage(t, ps, m2) + safeAddMessage(t, ps, m3) + + seenT1P0 := false + seenT1P1 := false + seenT2P0 := false + + ps.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + if len(msgs) != 1 { + t.Error("Wrong message count") + } + + if topic == "t1" && partition == 0 { + seenT1P0 = true + } else if topic == "t1" && partition == 1 { + seenT1P1 = true + } else if topic == "t2" && partition == 0 { + seenT2P0 = true + } + }) + + if !seenT1P0 { + t.Error("Didn't see t1p0") + } + if !seenT1P1 { + t.Error("Didn't see t1p1") + } + if !seenT2P0 { + t.Error("Didn't see t2p0") + } + + if len(ps.dropPartition("t1", 1)) != 1 { + t.Error("Got wrong messages back from dropping partition") + } + + if ps.bufferCount != 2 { + t.Error("Incorrect buffer count after dropping partition") + } +} + +func TestProduceSetRequestBuilding(t *testing.T) { + parent, ps := makeProduceSet() + parent.conf.Producer.RequiredAcks = WaitForAll + parent.conf.Producer.Timeout = 10 * time.Second + + msg := &ProducerMessage{ + Topic: "t1", + Partition: 0, + Key: StringEncoder(TestMessage), + Value: StringEncoder(TestMessage), + } + for i := 0; i < 10; i++ { + safeAddMessage(t, ps, msg) + } + msg.Partition = 1 + for i := 0; i < 10; i++ { + safeAddMessage(t, ps, msg) + } + msg.Topic = "t2" + for i := 0; i < 10; i++ { + safeAddMessage(t, ps, msg) + } + + req := ps.buildRequest() + + if req.RequiredAcks != WaitForAll { + t.Error("RequiredAcks not set properly") + } + + if req.Timeout != 10000 { + t.Error("Timeout not set properly") + } + + if len(req.msgSets) != 2 { + t.Error("Wrong number of topics in request") + } +} diff --git a/vendor/github.com/Shopify/sarama/request_test.go b/vendor/github.com/Shopify/sarama/request_test.go new file mode 100644 index 0000000000..e431e23d17 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/request_test.go @@ -0,0 +1,87 @@ +package sarama + +import ( + "bytes" + "reflect" + "testing" +) + +type testRequestBody struct { +} + +func (s *testRequestBody) key() int16 { + return 0x666 +} + +func (s *testRequestBody) version() int16 { + return 0xD2 +} + +func (s *testRequestBody) encode(pe packetEncoder) error { + return pe.putString("abc") +} + +// not specific to request tests, just helper functions for testing structures that +// implement the encoder or decoder interfaces that needed somewhere to live + +func testEncodable(t *testing.T, name string, in encoder, expect []byte) { + packet, err := encode(in) + if err != nil { + t.Error(err) + } else if !bytes.Equal(packet, expect) { + t.Error("Encoding", name, "failed\ngot ", packet, "\nwant", expect) + } +} + +func testDecodable(t *testing.T, name string, out decoder, in []byte) { + err := decode(in, out) + if err != nil { + t.Error("Decoding", name, "failed:", err) + } +} + +func testVersionDecodable(t *testing.T, name string, out versionedDecoder, in []byte, version int16) { + err := versionedDecode(in, out, version) + if err != nil { + t.Error("Decoding", name, "version", version, "failed:", err) + } +} + +func testRequest(t *testing.T, name string, rb protocolBody, expected []byte) { + // Encoder request + req := &request{correlationID: 123, clientID: "foo", body: rb} + packet, err := encode(req) + headerSize := 14 + len("foo") + if err != nil { + t.Error(err) + } else if !bytes.Equal(packet[headerSize:], expected) { + t.Error("Encoding", name, "failed\ngot ", packet[headerSize:], "\nwant", expected) + } + // Decoder request + decoded, err := decodeRequest(bytes.NewReader(packet)) + if err != nil { + t.Error("Failed to decode request", err) + } else if decoded.correlationID != 123 || decoded.clientID != "foo" { + t.Errorf("Decoded header is not valid: %v", decoded) + } else if !reflect.DeepEqual(rb, decoded.body) { + t.Errorf("Decoded request does not match the encoded one\nencoded: %v\ndecoded: %v", rb, decoded.body) + } +} + +func testResponse(t *testing.T, name string, res protocolBody, expected []byte) { + encoded, err := encode(res) + if err != nil { + t.Error(err) + } else if expected != nil && !bytes.Equal(encoded, expected) { + t.Error("Encoding", name, "failed\ngot ", encoded, "\nwant", expected) + } + + decoded := reflect.New(reflect.TypeOf(res).Elem()).Interface().(versionedDecoder) + if err := versionedDecode(encoded, decoded, res.version()); err != nil { + t.Error("Decoding", name, "failed:", err) + } + + if !reflect.DeepEqual(decoded, res) { + t.Errorf("Decoded response does not match the encoded one\nencoded: %#v\ndecoded: %#v", res, decoded) + } +} diff --git a/vendor/github.com/Shopify/sarama/response_header_test.go b/vendor/github.com/Shopify/sarama/response_header_test.go new file mode 100644 index 0000000000..8f9fdb80c5 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/response_header_test.go @@ -0,0 +1,21 @@ +package sarama + +import "testing" + +var ( + responseHeaderBytes = []byte{ + 0x00, 0x00, 0x0f, 0x00, + 0x0a, 0xbb, 0xcc, 0xff} +) + +func TestResponseHeader(t *testing.T) { + header := responseHeader{} + + testDecodable(t, "response header", &header, responseHeaderBytes) + if header.length != 0xf00 { + t.Error("Decoding header length failed, got", header.length) + } + if header.correlationID != 0x0abbccff { + t.Error("Decoding header correlation id failed, got", header.correlationID) + } +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go new file mode 100644 index 0000000000..806e628fd9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request_test.go @@ -0,0 +1,17 @@ +package sarama + +import "testing" + +var ( + baseSaslRequest = []byte{ + 0, 3, 'f', 'o', 'o', // Mechanism + } +) + +func TestSaslHandshakeRequest(t *testing.T) { + var request *SaslHandshakeRequest + + request = new(SaslHandshakeRequest) + request.Mechanism = "foo" + testRequest(t, "basic", request, baseSaslRequest) +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go new file mode 100644 index 0000000000..1fd4c79e08 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response_test.go @@ -0,0 +1,24 @@ +package sarama + +import "testing" + +var ( + saslHandshakeResponse = []byte{ + 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, + 0x00, 0x03, 'f', 'o', 'o', + } +) + +func TestSaslHandshakeResponse(t *testing.T) { + var response *SaslHandshakeResponse + + response = new(SaslHandshakeResponse) + testVersionDecodable(t, "no error", response, saslHandshakeResponse, 0) + if response.Err != ErrNoError { + t.Error("Decoding error failed: no error expected but found", response.Err) + } + if response.EnabledMechanisms[0] != "foo" { + t.Error("Decoding error failed: expected 'foo' but found", response.EnabledMechanisms) + } +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request_test.go b/vendor/github.com/Shopify/sarama/sync_group_request_test.go new file mode 100644 index 0000000000..3f537ef9fb --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_request_test.go @@ -0,0 +1,38 @@ +package sarama + +import "testing" + +var ( + emptySyncGroupRequest = []byte{ + 0, 3, 'f', 'o', 'o', // Group ID + 0x00, 0x01, 0x02, 0x03, // Generation ID + 0, 3, 'b', 'a', 'z', // Member ID + 0, 0, 0, 0, // no assignments + } + + populatedSyncGroupRequest = []byte{ + 0, 3, 'f', 'o', 'o', // Group ID + 0x00, 0x01, 0x02, 0x03, // Generation ID + 0, 3, 'b', 'a', 'z', // Member ID + 0, 0, 0, 1, // one assignment + 0, 3, 'b', 'a', 'z', // Member ID + 0, 0, 0, 3, 'f', 'o', 'o', // Member assignment + } +) + +func TestSyncGroupRequest(t *testing.T) { + var request *SyncGroupRequest + + request = new(SyncGroupRequest) + request.GroupId = "foo" + request.GenerationId = 66051 + request.MemberId = "baz" + testRequest(t, "empty", request, emptySyncGroupRequest) + + request = new(SyncGroupRequest) + request.GroupId = "foo" + request.GenerationId = 66051 + request.MemberId = "baz" + request.AddGroupAssignment("baz", []byte("foo")) + testRequest(t, "populated", request, populatedSyncGroupRequest) +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response_test.go b/vendor/github.com/Shopify/sarama/sync_group_response_test.go new file mode 100644 index 0000000000..6fb708858a --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_response_test.go @@ -0,0 +1,40 @@ +package sarama + +import ( + "reflect" + "testing" +) + +var ( + syncGroupResponseNoError = []byte{ + 0x00, 0x00, // No error + 0, 0, 0, 3, 0x01, 0x02, 0x03, // Member assignment data + } + + syncGroupResponseWithError = []byte{ + 0, 27, // ErrRebalanceInProgress + 0, 0, 0, 0, // No member assignment data + } +) + +func TestSyncGroupResponse(t *testing.T) { + var response *SyncGroupResponse + + response = new(SyncGroupResponse) + testVersionDecodable(t, "no error", response, syncGroupResponseNoError, 0) + if response.Err != ErrNoError { + t.Error("Decoding Err failed: no error expected but found", response.Err) + } + if !reflect.DeepEqual(response.MemberAssignment, []byte{0x01, 0x02, 0x03}) { + t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment) + } + + response = new(SyncGroupResponse) + testVersionDecodable(t, "no error", response, syncGroupResponseWithError, 0) + if response.Err != ErrRebalanceInProgress { + t.Error("Decoding Err failed: ErrRebalanceInProgress expected but found", response.Err) + } + if !reflect.DeepEqual(response.MemberAssignment, []byte{}) { + t.Error("Decoding MemberAssignment failed, found:", response.MemberAssignment) + } +} diff --git a/vendor/github.com/Shopify/sarama/sync_producer_test.go b/vendor/github.com/Shopify/sarama/sync_producer_test.go new file mode 100644 index 0000000000..12ed20e1fc --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_producer_test.go @@ -0,0 +1,196 @@ +package sarama + +import ( + "log" + "sync" + "testing" +) + +func TestSyncProducer(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + for i := 0; i < 10; i++ { + leader.Returns(prodSuccess) + } + + producer, err := NewSyncProducer([]string{seedBroker.Addr()}, nil) + if err != nil { + t.Fatal(err) + } + + for i := 0; i < 10; i++ { + msg := &ProducerMessage{ + Topic: "my_topic", + Value: StringEncoder(TestMessage), + Metadata: "test", + } + + partition, offset, err := producer.SendMessage(msg) + + if partition != 0 || msg.Partition != partition { + t.Error("Unexpected partition") + } + if offset != 0 || msg.Offset != offset { + t.Error("Unexpected offset") + } + if str, ok := msg.Metadata.(string); !ok || str != "test" { + t.Error("Unexpected metadata") + } + if err != nil { + t.Error(err) + } + } + + safeClose(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestSyncProducerBatch(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 3 + producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + err = producer.SendMessages([]*ProducerMessage{ + &ProducerMessage{ + Topic: "my_topic", + Value: StringEncoder(TestMessage), + Metadata: "test", + }, + &ProducerMessage{ + Topic: "my_topic", + Value: StringEncoder(TestMessage), + Metadata: "test", + }, + &ProducerMessage{ + Topic: "my_topic", + Value: StringEncoder(TestMessage), + Metadata: "test", + }, + }) + + if err != nil { + t.Error(err) + } + + safeClose(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestConcurrentSyncProducer(t *testing.T) { + seedBroker := NewMockBroker(t, 1) + leader := NewMockBroker(t, 2) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(leader.Addr(), leader.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, leader.BrokerID(), nil, nil, ErrNoError) + seedBroker.Returns(metadataResponse) + + prodSuccess := new(ProduceResponse) + prodSuccess.AddTopicPartition("my_topic", 0, ErrNoError) + leader.Returns(prodSuccess) + + config := NewConfig() + config.Producer.Flush.Messages = 100 + producer, err := NewSyncProducer([]string{seedBroker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + wg := sync.WaitGroup{} + + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder(TestMessage)} + partition, _, err := producer.SendMessage(msg) + if partition != 0 { + t.Error("Unexpected partition") + } + if err != nil { + t.Error(err) + } + wg.Done() + }() + } + wg.Wait() + + safeClose(t, producer) + leader.Close() + seedBroker.Close() +} + +func TestSyncProducerToNonExistingTopic(t *testing.T) { + broker := NewMockBroker(t, 1) + + metadataResponse := new(MetadataResponse) + metadataResponse.AddBroker(broker.Addr(), broker.BrokerID()) + metadataResponse.AddTopicPartition("my_topic", 0, broker.BrokerID(), nil, nil, ErrNoError) + broker.Returns(metadataResponse) + + config := NewConfig() + config.Metadata.Retry.Max = 0 + config.Producer.Retry.Max = 0 + + producer, err := NewSyncProducer([]string{broker.Addr()}, config) + if err != nil { + t.Fatal(err) + } + + metadataResponse = new(MetadataResponse) + metadataResponse.AddTopic("unknown", ErrUnknownTopicOrPartition) + broker.Returns(metadataResponse) + + _, _, err = producer.SendMessage(&ProducerMessage{Topic: "unknown"}) + if err != ErrUnknownTopicOrPartition { + t.Error("Uxpected ErrUnknownTopicOrPartition, found:", err) + } + + safeClose(t, producer) + broker.Close() +} + +// This example shows the basic usage pattern of the SyncProducer. +func ExampleSyncProducer() { + producer, err := NewSyncProducer([]string{"localhost:9092"}, nil) + if err != nil { + log.Fatalln(err) + } + defer func() { + if err := producer.Close(); err != nil { + log.Fatalln(err) + } + }() + + msg := &ProducerMessage{Topic: "my_topic", Value: StringEncoder("testing 123")} + partition, offset, err := producer.SendMessage(msg) + if err != nil { + log.Printf("FAILED to send message: %s\n", err) + } else { + log.Printf("> message sent to partition %d at offset %d\n", partition, offset) + } +} diff --git a/vendor/github.com/Shopify/sarama/utils_test.go b/vendor/github.com/Shopify/sarama/utils_test.go new file mode 100644 index 0000000000..a9e09502c7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/utils_test.go @@ -0,0 +1,21 @@ +package sarama + +import "testing" + +func TestVersionCompare(t *testing.T) { + if V0_8_2_0.IsAtLeast(V0_8_2_1) { + t.Error("0.8.2.0 >= 0.8.2.1") + } + if !V0_8_2_1.IsAtLeast(V0_8_2_0) { + t.Error("! 0.8.2.1 >= 0.8.2.0") + } + if !V0_8_2_0.IsAtLeast(V0_8_2_0) { + t.Error("! 0.8.2.0 >= 0.8.2.0") + } + if !V0_9_0_0.IsAtLeast(V0_8_2_1) { + t.Error("! 0.9.0.0 >= 0.8.2.1") + } + if V0_8_2_1.IsAtLeast(V0_10_0_0) { + t.Error("0.8.2.1 >= 0.10.0.0") + } +} diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore new file mode 100644 index 0000000000..66be63a005 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.gitignore @@ -0,0 +1 @@ +logrus diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml new file mode 100644 index 0000000000..2d8c086617 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -0,0 +1,8 @@ +language: go +go: + - 1.2 + - 1.3 + - 1.4 + - tip +install: + - go get -t ./... diff --git a/vendor/github.com/Sirupsen/logrus/entry_test.go b/vendor/github.com/Sirupsen/logrus/entry_test.go new file mode 100644 index 0000000000..98717df490 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry_test.go @@ -0,0 +1,53 @@ +package logrus + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestEntryPanicln(t *testing.T) { + errBoom := fmt.Errorf("boom time") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicln("kaboom") +} + +func TestEntryPanicf(t *testing.T) { + errBoom := fmt.Errorf("boom again") + + defer func() { + p := recover() + assert.NotNil(t, p) + + switch pVal := p.(type) { + case *Entry: + assert.Equal(t, "kaboom true", pVal.Message) + assert.Equal(t, errBoom, pVal.Data["err"]) + default: + t.Fatalf("want type *Entry, got %T: %#v", pVal, pVal) + } + }() + + logger := New() + logger.Out = &bytes.Buffer{} + entry := NewEntry(logger) + entry.WithField("err", errBoom).Panicf("kaboom %v", true) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go new file mode 100644 index 0000000000..77989da629 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter_bench_test.go @@ -0,0 +1,88 @@ +package logrus + +import ( + "testing" + "time" +) + +// smallFields is a small size data set for benchmarking +var smallFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", +} + +// largeFields is a large size data set for benchmarking +var largeFields = Fields{ + "foo": "bar", + "baz": "qux", + "one": "two", + "three": "four", + "five": "six", + "seven": "eight", + "nine": "ten", + "eleven": "twelve", + "thirteen": "fourteen", + "fifteen": "sixteen", + "seventeen": "eighteen", + "nineteen": "twenty", + "a": "b", + "c": "d", + "e": "f", + "g": "h", + "i": "j", + "k": "l", + "m": "n", + "o": "p", + "q": "r", + "s": "t", + "u": "v", + "w": "x", + "y": "z", + "this": "will", + "make": "thirty", + "entries": "yeah", +} + +func BenchmarkSmallTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, smallFields) +} + +func BenchmarkLargeTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{DisableColors: true}, largeFields) +} + +func BenchmarkSmallColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, smallFields) +} + +func BenchmarkLargeColoredTextFormatter(b *testing.B) { + doBenchmark(b, &TextFormatter{ForceColors: true}, largeFields) +} + +func BenchmarkSmallJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, smallFields) +} + +func BenchmarkLargeJSONFormatter(b *testing.B) { + doBenchmark(b, &JSONFormatter{}, largeFields) +} + +func doBenchmark(b *testing.B, formatter Formatter, fields Fields) { + entry := &Entry{ + Time: time.Time{}, + Level: InfoLevel, + Message: "message", + Data: fields, + } + var d []byte + var err error + for i := 0; i < b.N; i++ { + d, err = formatter.Format(entry) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(d))) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/hook_test.go b/vendor/github.com/Sirupsen/logrus/hook_test.go new file mode 100644 index 0000000000..13f34cb6f8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hook_test.go @@ -0,0 +1,122 @@ +package logrus + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +type TestHook struct { + Fired bool +} + +func (hook *TestHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *TestHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookFires(t *testing.T) { + hook := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + assert.Equal(t, hook.Fired, false) + + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} + +type ModifyHook struct { +} + +func (hook *ModifyHook) Fire(entry *Entry) error { + entry.Data["wow"] = "whale" + return nil +} + +func (hook *ModifyHook) Levels() []Level { + return []Level{ + DebugLevel, + InfoLevel, + WarnLevel, + ErrorLevel, + FatalLevel, + PanicLevel, + } +} + +func TestHookCanModifyEntry(t *testing.T) { + hook := new(ModifyHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + }) +} + +func TestCanFireMultipleHooks(t *testing.T) { + hook1 := new(ModifyHook) + hook2 := new(TestHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook1) + log.Hooks.Add(hook2) + + log.WithField("wow", "elephant").Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["wow"], "whale") + assert.Equal(t, hook2.Fired, true) + }) +} + +type ErrorHook struct { + Fired bool +} + +func (hook *ErrorHook) Fire(entry *Entry) error { + hook.Fired = true + return nil +} + +func (hook *ErrorHook) Levels() []Level { + return []Level{ + ErrorLevel, + } +} + +func TestErrorHookShouldntFireOnInfo(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, false) + }) +} + +func TestErrorHookShouldFireOnError(t *testing.T) { + hook := new(ErrorHook) + + LogAndAssertJSON(t, func(log *Logger) { + log.Hooks.Add(hook) + log.Error("test") + }, func(fields Fields) { + assert.Equal(t, hook.Fired, true) + }) +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter_test.go b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go new file mode 100644 index 0000000000..1d70873254 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter_test.go @@ -0,0 +1,120 @@ +package logrus + +import ( + "encoding/json" + "errors" + + "testing" +) + +func TestErrorNotLost(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("error", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["error"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestErrorNotLostOnFieldNotNamedError(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("omg", errors.New("wild walrus"))) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["omg"] != "wild walrus" { + t.Fatal("Error field not set") + } +} + +func TestFieldClashWithTime(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("time", "right now!")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.time"] != "right now!" { + t.Fatal("fields.time not set to original time field") + } + + if entry["time"] != "0001-01-01T00:00:00Z" { + t.Fatal("time field not set to current time, was: ", entry["time"]) + } +} + +func TestFieldClashWithMsg(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("msg", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.msg"] != "something" { + t.Fatal("fields.msg not set to original msg field") + } +} + +func TestFieldClashWithLevel(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + entry := make(map[string]interface{}) + err = json.Unmarshal(b, &entry) + if err != nil { + t.Fatal("Unable to unmarshal formatted entry: ", err) + } + + if entry["fields.level"] != "something" { + t.Fatal("fields.level not set to original level field") + } +} + +func TestJSONEntryEndsWithNewline(t *testing.T) { + formatter := &JSONFormatter{} + + b, err := formatter.Format(WithField("level", "something")) + if err != nil { + t.Fatal("Unable to format entry: ", err) + } + + if b[len(b)-1] != '\n' { + t.Fatal("Expected JSON log entry to end with a newline") + } +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus_test.go b/vendor/github.com/Sirupsen/logrus/logrus_test.go new file mode 100644 index 0000000000..d85dba4dcb --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus_test.go @@ -0,0 +1,301 @@ +package logrus + +import ( + "bytes" + "encoding/json" + "strconv" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func LogAndAssertJSON(t *testing.T, log func(*Logger), assertions func(fields Fields)) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + log(logger) + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assertions(fields) +} + +func LogAndAssertText(t *testing.T, log func(*Logger), assertions func(fields map[string]string)) { + var buffer bytes.Buffer + + logger := New() + logger.Out = &buffer + logger.Formatter = &TextFormatter{ + DisableColors: true, + } + + log(logger) + + fields := make(map[string]string) + for _, kv := range strings.Split(buffer.String(), " ") { + if !strings.Contains(kv, "=") { + continue + } + kvArr := strings.Split(kv, "=") + key := strings.TrimSpace(kvArr[0]) + val := kvArr[1] + if kvArr[1][0] == '"' { + var err error + val, err = strconv.Unquote(val) + assert.NoError(t, err) + } + fields[key] = val + } + assertions(fields) +} + +func TestPrint(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Print("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestInfo(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "info") + }) +} + +func TestWarn(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Warn("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["level"], "warning") + }) +} + +func TestInfolnShouldAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test test") + }) +} + +func TestInfolnShouldAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test 10") + }) +} + +func TestInfolnShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldAddSpacesBetweenTwoNonStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Infoln(10, 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "10 10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStringAndNonstring(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", 10) + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test10") + }) +} + +func TestInfoShouldNotAddSpacesBetweenStrings(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.Info("test", "test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "testtest") + }) +} + +func TestWithFieldsShouldAllowAssignments(t *testing.T) { + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + localLog := logger.WithFields(Fields{ + "key1": "value1", + }) + + localLog.WithField("key2", "value2").Info("test") + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + assert.Equal(t, "value2", fields["key2"]) + assert.Equal(t, "value1", fields["key1"]) + + buffer = bytes.Buffer{} + fields = Fields{} + localLog.Info("test") + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.Nil(t, err) + + _, ok := fields["key2"] + assert.Equal(t, false, ok) + assert.Equal(t, "value1", fields["key1"]) +} + +func TestUserSuppliedFieldDoesNotOverwriteDefaults(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + }) +} + +func TestUserSuppliedMsgFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("msg", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["msg"], "test") + assert.Equal(t, fields["fields.msg"], "hello") + }) +} + +func TestUserSuppliedTimeFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("time", "hello").Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["fields.time"], "hello") + }) +} + +func TestUserSuppliedLevelFieldHasPrefix(t *testing.T) { + LogAndAssertJSON(t, func(log *Logger) { + log.WithField("level", 1).Info("test") + }, func(fields Fields) { + assert.Equal(t, fields["level"], "info") + assert.Equal(t, fields["fields.level"], 1) + }) +} + +func TestDefaultFieldsAreNotPrefixed(t *testing.T) { + LogAndAssertText(t, func(log *Logger) { + ll := log.WithField("herp", "derp") + ll.Info("hello") + ll.Info("bye") + }, func(fields map[string]string) { + for _, fieldName := range []string{"fields.level", "fields.time", "fields.msg"} { + if _, ok := fields[fieldName]; ok { + t.Fatalf("should not have prefixed %q: %v", fieldName, fields) + } + } + }) +} + +func TestDoubleLoggingDoesntPrefixPreviousFields(t *testing.T) { + + var buffer bytes.Buffer + var fields Fields + + logger := New() + logger.Out = &buffer + logger.Formatter = new(JSONFormatter) + + llog := logger.WithField("context", "eating raw fish") + + llog.Info("looks delicious") + + err := json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded first message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "looks delicious") + assert.Equal(t, fields["context"], "eating raw fish") + + buffer.Reset() + + llog.Warn("omg it is!") + + err = json.Unmarshal(buffer.Bytes(), &fields) + assert.NoError(t, err, "should have decoded second message") + assert.Equal(t, len(fields), 4, "should only have msg/time/level/context fields") + assert.Equal(t, fields["msg"], "omg it is!") + assert.Equal(t, fields["context"], "eating raw fish") + assert.Nil(t, fields["fields.msg"], "should not have prefixed previous `msg` entry") + +} + +func TestConvertLevelToString(t *testing.T) { + assert.Equal(t, "debug", DebugLevel.String()) + assert.Equal(t, "info", InfoLevel.String()) + assert.Equal(t, "warning", WarnLevel.String()) + assert.Equal(t, "error", ErrorLevel.String()) + assert.Equal(t, "fatal", FatalLevel.String()) + assert.Equal(t, "panic", PanicLevel.String()) +} + +func TestParseLevel(t *testing.T) { + l, err := ParseLevel("panic") + assert.Nil(t, err) + assert.Equal(t, PanicLevel, l) + + l, err = ParseLevel("fatal") + assert.Nil(t, err) + assert.Equal(t, FatalLevel, l) + + l, err = ParseLevel("error") + assert.Nil(t, err) + assert.Equal(t, ErrorLevel, l) + + l, err = ParseLevel("warn") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("warning") + assert.Nil(t, err) + assert.Equal(t, WarnLevel, l) + + l, err = ParseLevel("info") + assert.Nil(t, err) + assert.Equal(t, InfoLevel, l) + + l, err = ParseLevel("debug") + assert.Nil(t, err) + assert.Equal(t, DebugLevel, l) + + l, err = ParseLevel("invalid") + assert.Equal(t, "not a valid logrus Level: \"invalid\"", err.Error()) +} + +func TestGetSetLevelRace(t *testing.T) { + wg := sync.WaitGroup{} + for i := 0; i < 100; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + if i%2 == 0 { + SetLevel(InfoLevel) + } else { + GetLevel() + } + }(i) + + } + wg.Wait() +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter_test.go b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go new file mode 100644 index 0000000000..e25a44f67b --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter_test.go @@ -0,0 +1,61 @@ +package logrus + +import ( + "bytes" + "errors" + "testing" + "time" +) + +func TestQuoting(t *testing.T) { + tf := &TextFormatter{DisableColors: true} + + checkQuoting := func(q bool, value interface{}) { + b, _ := tf.Format(WithField("test", value)) + idx := bytes.Index(b, ([]byte)("test=")) + cont := bytes.Contains(b[idx+5:], []byte{'"'}) + if cont != q { + if q { + t.Errorf("quoting expected for: %#v", value) + } else { + t.Errorf("quoting not expected for: %#v", value) + } + } + } + + checkQuoting(false, "abcd") + checkQuoting(false, "v1.0") + checkQuoting(false, "1234567890") + checkQuoting(true, "/foobar") + checkQuoting(true, "x y") + checkQuoting(true, "x,y") + checkQuoting(false, errors.New("invalid")) + checkQuoting(true, errors.New("invalid argument")) +} + +func TestTimestampFormat(t *testing.T) { + checkTimeStr := func(format string) { + customFormatter := &TextFormatter{DisableColors: true, TimestampFormat: format} + customStr, _ := customFormatter.Format(WithField("test", "test")) + timeStart := bytes.Index(customStr, ([]byte)("time=")) + timeEnd := bytes.Index(customStr, ([]byte)("level=")) + timeStr := customStr[timeStart+5 : timeEnd-1] + if timeStr[0] == '"' && timeStr[len(timeStr)-1] == '"' { + timeStr = timeStr[1 : len(timeStr)-1] + } + if format == "" { + format = time.RFC3339 + } + _, e := time.Parse(format, (string)(timeStr)) + if e != nil { + t.Errorf("time string \"%s\" did not match provided time format \"%s\": %s", timeStr, format, e) + } + } + + checkTimeStr("2006-01-02T15:04:05.000000000Z07:00") + checkTimeStr("Mon Jan _2 15:04:05 2006") + checkTimeStr("") +} + +// TODO add tests for sorting etc., this requires a parser for the text +// formatter output. diff --git a/vendor/github.com/Unknwon/com/.gitignore b/vendor/github.com/Unknwon/com/.gitignore new file mode 100644 index 0000000000..0da157fe9c --- /dev/null +++ b/vendor/github.com/Unknwon/com/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.iml diff --git a/vendor/github.com/Unknwon/com/.travis.yml b/vendor/github.com/Unknwon/com/.travis.yml new file mode 100644 index 0000000000..882eb2d122 --- /dev/null +++ b/vendor/github.com/Unknwon/com/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - tip + +install: go get -v -t + +notifications: + email: + - u@gogs.io \ No newline at end of file diff --git a/vendor/github.com/Unknwon/com/cmd_test.go b/vendor/github.com/Unknwon/com/cmd_test.go new file mode 100644 index 0000000000..97ad78f2e5 --- /dev/null +++ b/vendor/github.com/Unknwon/com/cmd_test.go @@ -0,0 +1,140 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "runtime" + "strings" + "testing" +) + +func TestColorLogS(t *testing.T) { + if runtime.GOOS != "windows" { + // Trace + path. + cls := ColorLogS("[TRAC] Trace level test with path( %s )", "/path/to/somethere") + clsR := fmt.Sprintf( + "[\033[%dmTRAC%s] Trace level test with path(\033[%dm%s%s)", + Blue, EndColor, Yellow, "/path/to/somethere", EndColor) + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Error + error. + cls = ColorLogS("[ERRO] Error level test with error[ %s ]", "test error") + clsR = fmt.Sprintf( + "[\033[%dmERRO%s] Error level test with error[\033[%dm%s%s]", + Red, EndColor, Red, "test error", EndColor) + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Warning + highlight. + cls = ColorLogS("[WARN] Warnning level test with highlight # %s #", "special offer!") + clsR = fmt.Sprintf( + "[\033[%dmWARN%s] Warnning level test with highlight \033[%dm%s%s", + Magenta, EndColor, Gray, "special offer!", EndColor) + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Success. + cls = ColorLogS("[SUCC] Success level test") + clsR = fmt.Sprintf( + "[\033[%dmSUCC%s] Success level test", + Green, EndColor) + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Default. + cls = ColorLogS("[INFO] Default level test") + clsR = fmt.Sprintf( + "[INFO] Default level test") + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + } else { + // Trace + path. + cls := ColorLogS("[TRAC] Trace level test with path( %s )", "/path/to/somethere") + clsR := fmt.Sprintf( + "[TRAC] Trace level test with path(%s)", + "/path/to/somethere") + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Error + error. + cls = ColorLogS("[ERRO] Error level test with error[ %s ]", "test error") + clsR = fmt.Sprintf( + "[ERRO] Error level test with error[%s]", + "test error") + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Warning + highlight. + cls = ColorLogS("[WARN] Warnning level test with highlight # %s #", "special offer!") + clsR = fmt.Sprintf( + "[WARN] Warnning level test with highlight %s", + "special offer!") + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Success. + cls = ColorLogS("[SUCC] Success level test") + clsR = fmt.Sprintf( + "[SUCC] Success level test") + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + + // Default. + cls = ColorLogS("[INFO] Default level test") + clsR = fmt.Sprintf( + "[INFO] Default level test") + if cls != clsR { + t.Errorf("ColorLogS:\n Expect => %s\n Got => %s\n", clsR, cls) + } + } +} + +func TestExecCmd(t *testing.T) { + stdout, stderr, err := ExecCmd("go", "help", "get") + if err != nil { + t.Errorf("ExecCmd:\n Expect => %v\n Got => %v\n", nil, err) + } else if len(stderr) != 0 { + t.Errorf("ExecCmd:\n Expect => %s\n Got => %s\n", "", stderr) + } else if !strings.HasPrefix(stdout, "usage: go get") { + t.Errorf("ExecCmd:\n Expect => %s\n Got => %s\n", "usage: go get", stdout) + } +} + +func BenchmarkColorLogS(b *testing.B) { + log := fmt.Sprintf( + "[WARN] This is a tesing log that should be colored, path( %s ),"+ + " highlight # %s #, error [ %s ].", + "path to somewhere", "highlighted content", "tesing error") + for i := 0; i < b.N; i++ { + ColorLogS(log) + } +} + +func BenchmarkExecCmd(b *testing.B) { + for i := 0; i < b.N; i++ { + ExecCmd("go", "help", "get") + } +} diff --git a/vendor/github.com/Unknwon/com/convert_test.go b/vendor/github.com/Unknwon/com/convert_test.go new file mode 100644 index 0000000000..e97e27f42e --- /dev/null +++ b/vendor/github.com/Unknwon/com/convert_test.go @@ -0,0 +1,56 @@ +// Copyright 2014 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestHexStr2int(t *testing.T) { + Convey("Convert hex format string to decimal", t, func() { + hexDecs := map[string]int{ + "1": 1, + "002": 2, + "011": 17, + "0a1": 161, + "35e": 862, + } + + for hex, dec := range hexDecs { + val, err := HexStr2int(hex) + So(err, ShouldBeNil) + So(val, ShouldEqual, dec) + } + }) +} + +func TestInt2HexStr(t *testing.T) { + Convey("Convert decimal to hex format string", t, func() { + decHexs := map[int]string{ + 1: "1", + 2: "2", + 17: "11", + 161: "a1", + 862: "35e", + } + + for dec, hex := range decHexs { + val := Int2HexStr(dec) + So(val, ShouldEqual, hex) + } + }) +} diff --git a/vendor/github.com/Unknwon/com/dir_test.go b/vendor/github.com/Unknwon/com/dir_test.go new file mode 100644 index 0000000000..5d9a92bb66 --- /dev/null +++ b/vendor/github.com/Unknwon/com/dir_test.go @@ -0,0 +1,56 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "os" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestIsDir(t *testing.T) { + Convey("Check if given path is a directory", t, func() { + Convey("Pass a file name", func() { + So(IsDir("file.go"), ShouldEqual, false) + }) + Convey("Pass a directory name", func() { + So(IsDir("testdata"), ShouldEqual, true) + }) + Convey("Pass a invalid path", func() { + So(IsDir("foo"), ShouldEqual, false) + }) + }) +} + +func TestCopyDir(t *testing.T) { + Convey("Items of two slices should be same", t, func() { + _, err := StatDir("testdata", true) + So(err, ShouldEqual, nil) + + err = CopyDir("testdata", "testdata2") + So(err, ShouldEqual, nil) + + _, err = StatDir("testdata2", true) + os.RemoveAll("testdata2") + So(err, ShouldEqual, nil) + }) +} + +func BenchmarkIsDir(b *testing.B) { + for i := 0; i < b.N; i++ { + IsDir("file.go") + } +} diff --git a/vendor/github.com/Unknwon/com/example_test.go b/vendor/github.com/Unknwon/com/example_test.go new file mode 100644 index 0000000000..bc1930bf84 --- /dev/null +++ b/vendor/github.com/Unknwon/com/example_test.go @@ -0,0 +1,299 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com_test + +import ( + "fmt" + "io/ioutil" + "net/http" + + "github.com/Unknwon/com" +) + +// ------------------------------ +// cmd.go +// ------------------------------ + +func ExampleColorLogS() { + coloredLog := com.ColorLogS(fmt.Sprintf( + "[WARN] This is a tesing log that should be colored, path( %s ),"+ + " highlight # %s #, error [ %s ].", + "path to somewhere", "highlighted content", "tesing error")) + fmt.Println(coloredLog) +} + +func ExampleColorLog() { + com.ColorLog(fmt.Sprintf( + "[WARN] This is a tesing log that should be colored, path( %s ),"+ + " highlight # %s #, error [ %s ].", + "path to somewhere", "highlighted content", "tesing error")) +} + +func ExampleExecCmd() { + stdout, stderr, err := com.ExecCmd("go", "help", "get") + fmt.Println(stdout, stderr, err) +} + +// ------------- END ------------ + +// ------------------------------ +// html.go +// ------------------------------ + +func ExampleHtml2JS() { + htm := "
Click me
\n\r" + js := string(com.Html2JS([]byte(htm))) + fmt.Println(js) + // Output:
Click me
\n +} + +// ------------- END ------------ + +// ------------------------------ +// path.go +// ------------------------------ + +func ExampleGetGOPATHs() { + gps := com.GetGOPATHs() + fmt.Println(gps) +} + +func ExampleGetSrcPath() { + srcPath, err := com.GetSrcPath("github.com/Unknwon/com") + if err != nil { + fmt.Println(err) + return + } + fmt.Println(srcPath) +} + +func ExampleHomeDir() { + hd, err := com.HomeDir() + fmt.Println(hd, err) +} + +// ------------- END ------------ + +// ------------------------------ +// file.go +// ------------------------------ + +func ExampleIsFile() { + if com.IsFile("file.go") { + fmt.Println("file.go exists") + return + } + fmt.Println("file.go is not a file or does not exist") +} + +func ExampleIsExist() { + if com.IsExist("file.go") { + fmt.Println("file.go exists") + return + } + fmt.Println("file.go does not exist") +} + +// ------------- END ------------ + +// ------------------------------ +// dir.go +// ------------------------------ + +func ExampleIsDir() { + if com.IsDir("files") { + fmt.Println("directory 'files' exists") + return + } + fmt.Println("'files' is not a directory or does not exist") +} + +// ------------- END ------------ + +// ------------------------------ +// string.go +// ------------------------------ + +func ExampleIsLetter() { + fmt.Println(com.IsLetter('1')) + fmt.Println(com.IsLetter('[')) + fmt.Println(com.IsLetter('a')) + fmt.Println(com.IsLetter('Z')) + // Output: + // false + // false + // true + // true +} + +func ExampleExpand() { + match := map[string]string{ + "domain": "gowalker.org", + "subdomain": "github.com", + } + s := "http://{domain}/{subdomain}/{0}/{1}" + fmt.Println(com.Expand(s, match, "Unknwon", "gowalker")) + // Output: http://gowalker.org/github.com/Unknwon/gowalker +} + +// ------------- END ------------ + +// ------------------------------ +// http.go +// ------------------------------ + +func ExampleHttpGet() ([]byte, error) { + rc, err := com.HttpGet(&http.Client{}, "http://gowalker.org", nil) + if err != nil { + return nil, err + } + p, err := ioutil.ReadAll(rc) + rc.Close() + return p, err +} + +func ExampleHttpGetBytes() ([]byte, error) { + p, err := com.HttpGetBytes(&http.Client{}, "http://gowalker.org", nil) + return p, err +} + +func ExampleHttpGetJSON() interface{} { + j := com.HttpGetJSON(&http.Client{}, "http://gowalker.org", nil) + return j +} + +type rawFile struct { + name string + rawURL string + data []byte +} + +func (rf *rawFile) Name() string { + return rf.name +} + +func (rf *rawFile) RawUrl() string { + return rf.rawURL +} + +func (rf *rawFile) Data() []byte { + return rf.data +} + +func (rf *rawFile) SetData(p []byte) { + rf.data = p +} + +func ExampleFetchFiles() { + // Code that should be outside of your function body. + // type rawFile struct { + // name string + // rawURL string + // data []byte + // } + + // func (rf *rawFile) Name() string { + // return rf.name + // } + + // func (rf *rawFile) RawUrl() string { + // return rf.rawURL + // } + + // func (rf *rawFile) Data() []byte { + // return rf.data + // } + + // func (rf *rawFile) SetData(p []byte) { + // rf.data = p + // } + + files := []com.RawFile{ + &rawFile{rawURL: "http://example.com"}, + &rawFile{rawURL: "http://example.com/foo"}, + } + err := com.FetchFiles(&http.Client{}, files, nil) + fmt.Println(err, len(files[0].Data()), len(files[1].Data())) +} + +func ExampleFetchFilesCurl() { + // Code that should be outside of your function body. + // type rawFile struct { + // name string + // rawURL string + // data []byte + // } + + // func (rf *rawFile) Name() string { + // return rf.name + // } + + // func (rf *rawFile) RawUrl() string { + // return rf.rawURL + // } + + // func (rf *rawFile) Data() []byte { + // return rf.data + // } + + // func (rf *rawFile) SetData(p []byte) { + // rf.data = p + // } + + files := []com.RawFile{ + &rawFile{rawURL: "http://example.com"}, + &rawFile{rawURL: "http://example.com/foo"}, + } + err := com.FetchFilesCurl(files) + fmt.Println(err, len(files[0].Data()), len(files[1].Data())) +} + +// ------------- END ------------ + +// ------------------------------ +// regex.go +// ------------------------------ + +func ExampleIsEmail() { + fmt.Println(com.IsEmail("test@example.com")) + fmt.Println(com.IsEmail("@example.com")) + // Output: + // true + // false +} + +func ExampleIsUrl() { + fmt.Println(com.IsUrl("http://example.com")) + fmt.Println(com.IsUrl("http//example.com")) + // Output: + // true + // false +} + +// ------------- END ------------ + +// ------------------------------ +// slice.go +// ------------------------------ + +func ExampleAppendStr() { + s := []string{"a"} + s = com.AppendStr(s, "a") + s = com.AppendStr(s, "b") + fmt.Println(s) + // Output: [a b] +} + +// ------------- END ------------ diff --git a/vendor/github.com/Unknwon/com/file_test.go b/vendor/github.com/Unknwon/com/file_test.go new file mode 100644 index 0000000000..4ff00dcd2d --- /dev/null +++ b/vendor/github.com/Unknwon/com/file_test.go @@ -0,0 +1,61 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestIsFile(t *testing.T) { + if !IsFile("file.go") { + t.Errorf("IsExist:\n Expect => %v\n Got => %v\n", true, false) + } + + if IsFile("testdata") { + t.Errorf("IsExist:\n Expect => %v\n Got => %v\n", false, true) + } + + if IsFile("files.go") { + t.Errorf("IsExist:\n Expect => %v\n Got => %v\n", false, true) + } +} + +func TestIsExist(t *testing.T) { + Convey("Check if file or directory exists", t, func() { + Convey("Pass a file name that exists", func() { + So(IsExist("file.go"), ShouldEqual, true) + }) + Convey("Pass a directory name that exists", func() { + So(IsExist("testdata"), ShouldEqual, true) + }) + Convey("Pass a directory name that does not exist", func() { + So(IsExist(".hg"), ShouldEqual, false) + }) + }) +} + +func BenchmarkIsFile(b *testing.B) { + for i := 0; i < b.N; i++ { + IsFile("file.go") + } +} + +func BenchmarkIsExist(b *testing.B) { + for i := 0; i < b.N; i++ { + IsExist("file.go") + } +} diff --git a/vendor/github.com/Unknwon/com/html_test.go b/vendor/github.com/Unknwon/com/html_test.go new file mode 100644 index 0000000000..2c9f404787 --- /dev/null +++ b/vendor/github.com/Unknwon/com/html_test.go @@ -0,0 +1,35 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "testing" +) + +func TestHtml2JS(t *testing.T) { + htm := "
Click me
\n\r" + js := string(Html2JS([]byte(htm))) + jsR := `
Click me
\n` + if js != jsR { + t.Errorf("Html2JS:\n Expect => %s\n Got => %s\n", jsR, js) + } +} + +func BenchmarkHtml2JS(b *testing.B) { + htm := "
Click me
\n\r" + for i := 0; i < b.N; i++ { + Html2JS([]byte(htm)) + } +} diff --git a/vendor/github.com/Unknwon/com/http_test.go b/vendor/github.com/Unknwon/com/http_test.go new file mode 100644 index 0000000000..902b6607a5 --- /dev/null +++ b/vendor/github.com/Unknwon/com/http_test.go @@ -0,0 +1,111 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "io/ioutil" + "net/http" + "strings" + "testing" +) + +var examplePrefix = ` + + + Example Domain +` + +func TestHttpGet(t *testing.T) { + // 200. + rc, err := HttpGet(&http.Client{}, "http://example.com", nil) + if err != nil { + t.Fatalf("HttpGet:\n Expect => %v\n Got => %s\n", nil, err) + } + p, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("HttpGet:\n Expect => %v\n Got => %s\n", nil, err) + } + s := string(p) + if !strings.HasPrefix(s, examplePrefix) { + t.Errorf("HttpGet:\n Expect => %s\n Got => %s\n", examplePrefix, s) + } +} + +func TestHttpGetBytes(t *testing.T) { + p, err := HttpGetBytes(&http.Client{}, "http://example.com", nil) + if err != nil { + t.Errorf("HttpGetBytes:\n Expect => %v\n Got => %s\n", nil, err) + } + s := string(p) + if !strings.HasPrefix(s, examplePrefix) { + t.Errorf("HttpGet:\n Expect => %s\n Got => %s\n", examplePrefix, s) + } +} + +func TestHttpGetJSON(t *testing.T) { + +} + +type rawFile struct { + name string + rawURL string + data []byte +} + +func (rf *rawFile) Name() string { + return rf.name +} + +func (rf *rawFile) RawUrl() string { + return rf.rawURL +} + +func (rf *rawFile) Data() []byte { + return rf.data +} + +func (rf *rawFile) SetData(p []byte) { + rf.data = p +} + +func TestFetchFiles(t *testing.T) { + files := []RawFile{ + &rawFile{rawURL: "http://example.com"}, + &rawFile{rawURL: "http://example.com"}, + } + err := FetchFiles(&http.Client{}, files, nil) + if err != nil { + t.Errorf("FetchFiles:\n Expect => %v\n Got => %s\n", nil, err) + } else if len(files[0].Data()) != 1270 { + t.Errorf("FetchFiles:\n Expect => %d\n Got => %d\n", 1270, len(files[0].Data())) + } else if len(files[1].Data()) != 1270 { + t.Errorf("FetchFiles:\n Expect => %d\n Got => %d\n", 1270, len(files[1].Data())) + } +} + +func TestFetchFilesCurl(t *testing.T) { + files := []RawFile{ + &rawFile{rawURL: "http://example.com"}, + &rawFile{rawURL: "http://example.com"}, + } + err := FetchFilesCurl(files) + if err != nil { + t.Errorf("FetchFilesCurl:\n Expect => %v\n Got => %s\n", nil, err) + } else if len(files[0].Data()) != 1270 { + t.Errorf("FetchFilesCurl:\n Expect => %d\n Got => %d\n", 1270, len(files[0].Data())) + } else if len(files[1].Data()) != 1270 { + t.Errorf("FetchFilesCurl:\n Expect => %d\n Got => %d\n", 1270, len(files[1].Data())) + } +} diff --git a/vendor/github.com/Unknwon/com/math_test.go b/vendor/github.com/Unknwon/com/math_test.go new file mode 100644 index 0000000000..587eb19c3d --- /dev/null +++ b/vendor/github.com/Unknwon/com/math_test.go @@ -0,0 +1,44 @@ +// Copyright 2015 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "math" + "math/rand" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Pow(t *testing.T) { + Convey("Power int", t, func() { + for x := 0; x < 10; x++ { + for y := 0; y < 8; y++ { + result := PowInt(x, y) + result_float := math.Pow(float64(x), float64(y)) + So(result, ShouldEqual, int(result_float)) + } + } + }) +} + +func BenchmarkPow(b *testing.B) { + x := rand.Intn(100) + y := rand.Intn(6) + b.ResetTimer() + for n := 0; n < b.N; n++ { + PowInt(x, y) + } +} diff --git a/vendor/github.com/Unknwon/com/path_test.go b/vendor/github.com/Unknwon/com/path_test.go new file mode 100644 index 0000000000..da5488d440 --- /dev/null +++ b/vendor/github.com/Unknwon/com/path_test.go @@ -0,0 +1,67 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "os" + "runtime" + "testing" +) + +func TestGetGOPATHs(t *testing.T) { + var gpsR []string + + if runtime.GOOS != "windows" { + gpsR = []string{"path/to/gopath1", "path/to/gopath2", "path/to/gopath3"} + os.Setenv("GOPATH", "path/to/gopath1:path/to/gopath2:path/to/gopath3") + } else { + gpsR = []string{"path/to/gopath1", "path/to/gopath2", "path/to/gopath3"} + os.Setenv("GOPATH", "path\\to\\gopath1;path\\to\\gopath2;path\\to\\gopath3") + } + + gps := GetGOPATHs() + if !CompareSliceStr(gps, gpsR) { + t.Errorf("GetGOPATHs:\n Expect => %s\n Got => %s\n", gpsR, gps) + } +} + +func TestGetSrcPath(t *testing.T) { + +} + +func TestHomeDir(t *testing.T) { + _, err := HomeDir() + if err != nil { + t.Errorf("HomeDir:\n Expect => %v\n Got => %s\n", nil, err) + } +} + +func BenchmarkGetGOPATHs(b *testing.B) { + for i := 0; i < b.N; i++ { + GetGOPATHs() + } +} + +func BenchmarkGetSrcPath(b *testing.B) { + for i := 0; i < b.N; i++ { + GetSrcPath("github.com/Unknwon/com") + } +} + +func BenchmarkHomeDir(b *testing.B) { + for i := 0; i < b.N; i++ { + HomeDir() + } +} diff --git a/vendor/github.com/Unknwon/com/regex_test.go b/vendor/github.com/Unknwon/com/regex_test.go new file mode 100644 index 0000000000..b027bd1c8d --- /dev/null +++ b/vendor/github.com/Unknwon/com/regex_test.go @@ -0,0 +1,70 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "testing" +) + +func TestIsEmail(t *testing.T) { + emails := map[string]bool{ + `test@example.com`: true, + `single-character@b.org`: true, + `uncommon_address@test.museum`: true, + `local@sld.UPPER`: true, + `@missing.org`: false, + `missing@.com`: false, + `missing@qq.`: false, + `wrong-ip@127.1.1.1.26`: false, + } + for e, r := range emails { + b := IsEmail(e) + if b != r { + t.Errorf("IsEmail:\n Expect => %v\n Got => %v\n", r, b) + } + } +} + +func TestIsUrl(t *testing.T) { + urls := map[string]bool{ + "http://www.example.com": true, + "http://example.com": true, + "http://example.com?user=test&password=test": true, + "http://example.com?user=test#login": true, + "ftp://example.com": true, + "https://example.com": true, + "htp://example.com": false, + "http//example.com": false, + "http://example": true, + } + for u, r := range urls { + b := IsUrl(u) + if b != r { + t.Errorf("IsUrl:\n Expect => %v\n Got => %v\n", r, b) + } + } +} + +func BenchmarkIsEmail(b *testing.B) { + for i := 0; i < b.N; i++ { + IsEmail("test@example.com") + } +} + +func BenchmarkIsUrl(b *testing.B) { + for i := 0; i < b.N; i++ { + IsEmail("http://example.com") + } +} diff --git a/vendor/github.com/Unknwon/com/slice_test.go b/vendor/github.com/Unknwon/com/slice_test.go new file mode 100644 index 0000000000..6a94156c23 --- /dev/null +++ b/vendor/github.com/Unknwon/com/slice_test.go @@ -0,0 +1,99 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "fmt" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestAppendStr(t *testing.T) { + Convey("Append a string to a slice with no duplicates", t, func() { + s := []string{"a"} + + Convey("Append a string that does not exist in slice", func() { + s = AppendStr(s, "b") + So(len(s), ShouldEqual, 2) + }) + + Convey("Append a string that does exist in slice", func() { + s = AppendStr(s, "b") + So(len(s), ShouldEqual, 2) + }) + }) +} + +func TestCompareSliceStr(t *testing.T) { + Convey("Compares two 'string' type slices with elements and order", t, func() { + Convey("Compare two slices that do have same elements and order", func() { + So(CompareSliceStr( + []string{"1", "2", "3"}, []string{"1", "2", "3"}), ShouldBeTrue) + }) + + Convey("Compare two slices that do have same elements but does not have same order", func() { + So(!CompareSliceStr( + []string{"2", "1", "3"}, []string{"1", "2", "3"}), ShouldBeTrue) + }) + + Convey("Compare two slices that have different number of elements", func() { + So(!CompareSliceStr( + []string{"2", "1"}, []string{"1", "2", "3"}), ShouldBeTrue) + }) + }) +} + +func TestCompareSliceStrU(t *testing.T) { + Convey("Compare two 'string' type slices with elements and ignore the order", t, func() { + Convey("Compare two slices that do have same elements and order", func() { + So(CompareSliceStrU( + []string{"1", "2", "3"}, []string{"1", "2", "3"}), ShouldBeTrue) + }) + + Convey("Compare two slices that do have same elements but does not have same order", func() { + So(CompareSliceStrU( + []string{"2", "1", "3"}, []string{"1", "2", "3"}), ShouldBeTrue) + }) + + Convey("Compare two slices that have different number of elements", func() { + So(!CompareSliceStrU( + []string{"2", "1"}, []string{"1", "2", "3"}), ShouldBeTrue) + }) + }) +} + +func BenchmarkAppendStr(b *testing.B) { + s := []string{"a"} + for i := 0; i < b.N; i++ { + s = AppendStr(s, fmt.Sprint(b.N%3)) + } +} + +func BenchmarkCompareSliceStr(b *testing.B) { + s1 := []string{"1", "2", "3"} + s2 := []string{"1", "2", "3"} + for i := 0; i < b.N; i++ { + CompareSliceStr(s1, s2) + } +} + +func BenchmarkCompareSliceStrU(b *testing.B) { + s1 := []string{"1", "4", "2", "3"} + s2 := []string{"1", "2", "3", "4"} + for i := 0; i < b.N; i++ { + CompareSliceStrU(s1, s2) + } +} diff --git a/vendor/github.com/Unknwon/com/string_test.go b/vendor/github.com/Unknwon/com/string_test.go new file mode 100644 index 0000000000..114897887c --- /dev/null +++ b/vendor/github.com/Unknwon/com/string_test.go @@ -0,0 +1,108 @@ +// Copyright 2013 com authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package com + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestIsLetter(t *testing.T) { + if IsLetter('1') { + t.Errorf("IsLetter:\n Expect => %v\n Got => %v\n", false, true) + } + + if IsLetter('[') { + t.Errorf("IsLetter:\n Expect => %v\n Got => %v\n", false, true) + } + + if !IsLetter('a') { + t.Errorf("IsLetter:\n Expect => %v\n Got => %v\n", true, false) + } + + if !IsLetter('Z') { + t.Errorf("IsLetter:\n Expect => %v\n Got => %v\n", true, false) + } +} + +func TestExpand(t *testing.T) { + match := map[string]string{ + "domain": "gowalker.org", + "subdomain": "github.com", + } + s := "http://{domain}/{subdomain}/{0}/{1}" + sR := "http://gowalker.org/github.com/Unknwon/gowalker" + if Expand(s, match, "Unknwon", "gowalker") != sR { + t.Errorf("Expand:\n Expect => %s\n Got => %s\n", sR, s) + } +} + +func TestReverse(t *testing.T) { + if Reverse("abcdefg") != "gfedcba" { + t.Errorf("Reverse:\n Except => %s\n Got =>%s\n", "gfedcba", Reverse("abcdefg")) + } + if Reverse("上善若水厚德载物") != "物载德厚水若善上" { + t.Errorf("Reverse:\n Except => %s\n Got =>%s\n", "物载德厚水若善上", Reverse("上善若水厚德载物")) + } +} + +func Test_ToSnakeCase(t *testing.T) { + cases := map[string]string{ + "HTTPServer": "http_server", + "_camelCase": "_camel_case", + "NoHTTPS": "no_https", + "Wi_thF": "wi_th_f", + "_AnotherTES_TCaseP": "_another_tes_t_case_p", + "ALL": "all", + "_HELLO_WORLD_": "_hello_world_", + "HELLO_WORLD": "hello_world", + "HELLO____WORLD": "hello____world", + "TW": "tw", + "_C": "_c", + + " sentence case ": "__sentence_case__", + " Mixed-hyphen case _and SENTENCE_case and UPPER-case": "_mixed_hyphen_case__and_sentence_case_and_upper_case", + } + Convey("Convert string into snake case", t, func() { + for old, new := range cases { + So(ToSnakeCase(old), ShouldEqual, new) + } + }) +} + +func BenchmarkIsLetter(b *testing.B) { + for i := 0; i < b.N; i++ { + IsLetter('a') + } +} + +func BenchmarkExpand(b *testing.B) { + match := map[string]string{ + "domain": "gowalker.org", + "subdomain": "github.com", + } + s := "http://{domain}/{subdomain}/{0}/{1}" + for i := 0; i < b.N; i++ { + Expand(s, match, "Unknwon", "gowalker") + } +} + +func BenchmarkReverse(b *testing.B) { + s := "abscef中文" + for i := 0; i < b.N; i++ { + Reverse(s) + } +} diff --git a/vendor/github.com/alyu/configparser/configparser_test.go b/vendor/github.com/alyu/configparser/configparser_test.go new file mode 100644 index 0000000000..c199f26bf0 --- /dev/null +++ b/vendor/github.com/alyu/configparser/configparser_test.go @@ -0,0 +1,483 @@ +package configparser + +import ( + "bufio" + "log" + "os" + "os/exec" + "strings" + "testing" +) + +var gConfig *Configuration + +const ( + ConfigFilePath = "/tmp/configparser_test.ini" + ConfigFilePathSHA = "b0b06a0a48d952899fe68addd9f699d2043d045f" + ConfigNewFilePath = "/tmp/configparser_test_new.ini" + ConfigNewFilePathSHA = "a1a3f9acbebed990cbbb573d0f2b6fa9aa8ae5f0" + + SectionName1 = "MYSQLD DEFAULT" + SectionName2 = "MONGODB" + SectionName3 = "NDB_MGMD DEFAULT" + SectionNameRegex = "webservers$" + + Key1 = "TotalSendBufferMemory" + Key2 = "DefaultOperationRedoProblemAction" + Key3 = "innodb_buffer_pool_size" + Key4 = "innodb_buffer_pool_instances" + + Key5 = "datadir" + Key6 = "smallfiles" + + ConfigFileContent = `wsrep_provider_options="gcache.size=128M; evs.keepalive_period=PT3S; evs.inactive_check_period=PT10S; evs.suspect_timeout=PT30S; evs.inactive_timeout=PT1M; evs.consensus_timeout=PT1M; evs.send_window=1024; evs.user_send_window=512;" +# global settings +SendBufferMemory = 20M +ReceiveBufferMemory = 20M + +[dc1.webservers] +10.10.10.10 +20.20.20.20 +dc1.backup.local + +[dc2.database] +30.30.30.30 +40.40.40.40 +dc2.standby.local + +[dc2.webservers] +30.30.30.30 +40.40.40.40 + +[TCP DEFAULT] +#SendBufferMemory=20M +#ReceiveBufferMemory=20M + +[NDBD DEFAULT] +NoOfReplicas=2 +DataDir=/data/mysql/cluster/dev +FileSystemPath=/data/mysql/cluster/dev +#FileSystemPathDD= +#FileSystemPathDataFiles= +#FileSystemPathUndoFiles= +#BackupDataDir= +#InitialLogFileGroup=name=lg1;undo_buffer_size:64M;undo1.log:64M +#InitialTablespace=name=ts1;extent_size:1M;data1.dat:256M;data2.dat:256M + +DataMemory:256M +IndexMemory:32M +DiskPageBufferMemory:64M +SharedGlobalMemory=128M +RedoBuffer=48M +TotalSendBufferMemory=20M + +LockPagesInMainMemory=1 +Numa=0 + +RealtimeScheduler=1 +MaxNoOfExecutionThreads=4 +#LockExecuteThreadToCPU= +#LockMaintThreadsToCPU= +DiskIOThreadPool=2 + +BuildIndexThreads=2 +TwoPassInitialNodeRestartCopy=1 + +DiskCheckpointSpeedInRestart=100M +DiskCheckpointSpeed=10M + +FragmentLogFileSize=256M +NoOfFragmentLogFiles=6 +InitFragmentLogFiles=SPARSE + +ODirect=1 +;CompressedBackup=0 +;CompressedLCP=0 +Diskless=0 + +TimeBetweenLocalCheckpoints=20 +TimeBetweenGlobalCheckpoints=2000 +TimeBetweenEpochs=100 +;This parameter defines a timeout for synchronization epochs for MySQL Cluster Replication. If a node fails to participate in a global checkpoint within the time determined by this parameter, the node is shut down +#TimeBetweenEpochsTimeout=4000 +# Set in production +#TimeBetweenInactiveTransactionAbortCheck=1000 +#TransactionDeadlockDetectionTimeout=1200 +#TransactionInactiveTimeout=0 +# Might need to increase initial check for large data memory allocations +#TimeBetweenWatchDogCheckInitial = 6000 +#TimeBetweenWatchDogCheck= 6000 + +MaxNoOfConcurrentOperations=250000 +MaxNoOfConcurrentScans=500 +#MaxNoOfLocalScans=2048 + +#MaxNoOfConcurrentScans=256 (2-500) +#MaxNoOfLocalScans=numOfDataNodes*MaxNoOfConcurrentScans +# 1-992 +#BatchSizePerLocalScan=900 +#MaxParallelScansPerFragment=256 (1-1G) + +# % of max value +StringMemory=25 +MaxNoOfTables=2048 +MaxNoOfOrderedIndexes=1024 +MaxNoOfUniqueHashIndexes=1024 +MaxNoOfAttributes=8192 +MaxNoOfTriggers=8192 + +#MemReportFrequency=10 +StartupStatusReportFrequency=10 + +### Params for setting logging +LogLevelStartup=15 +LogLevelShutdown=15 +LogLevelCheckpoint=8 +LogLevelNodeRestart=15 +LogLevelCongestion=15 +LogLevelStatistic=15 + +### Params for increasing Disk throughput +BackupDataBufferSize=16M +BackupLogBufferSize=16M +BackupMemory=32M +#If BackupDataBufferSize and BackupLogBufferSize taken together exceed the default value for BackupMemory, then this parameter must be set explicitly in the config.ini file to their sum. +BackupWriteSize=256K +BackupMaxWriteSize=1M +BackupReportFrequency=10 + +### CGE 6.3 - REALTIME EXTENSIONS +#RealTimeScheduler=1 +#SchedulerExecutionTimer=80 +#SchedulerSpinTimer=40 + +RedoOverCommitCounter=3 +RedoOverCommitLimit=20 + +StartFailRetryDelay=0 +MaxStartFailRetries=3 + +[NDB_MGMD DEFAULT] +PortNumber=1186 +DataDir=/data/mysql/cluster/dev +#MaxNoOfSavedEvents=100 +TotalSendBufferMemory=4M + +[NDB_MGMD] +NodeId=1 +HostName=localhost +PortNumber=1186 +ArbitrationRank=1 + +#[NDB_MGMD] +#NodeId=2 +#HostName=localhost +#PortNumber=1187 +#ArbitrationRank=1 + +[NDBD] +NodeId=10 +HostName=localhost +#HeartbeatOrder=10 + +[NDBD] +NodeId=11 +HostName=localhost +#HeartbeatOrder=20 + +[NDBD] +NodeId=12 +HostName=localhost +#HeartbeatOrder=20 +NodeGroup=65536 + +[NDBD] +NodeId=13 +HostName=localhost +#HeartbeatOrder=20 +NodeGroup=65536 + +[NDBD] +NodeId=14 +HostName=localhost +#HeartbeatOrder=20 +NodeGroup=65536 + +[NDBD] +NodeId=15 +HostName=localhost +#HeartbeatOrder=20 +NodeGroup=65536 + +# +# Note=The following can be MySQLD connections or +# NDB API application connecting to the cluster +# +[MYSQLD DEFAULT] +TotalSendBufferMemory=10M +DefaultOperationRedoProblemAction=ABORT +#DefaultOperationRedoProblemAction=QUEUE +#BatchByteSize=32K (1024-1M) +# 1-992 +#BatchSize=900 +#MaxScanBatchSize=256K (32K-16M) +; this is another comment +[MYSQLD] +NodeId=100 +HostName=localhost + +[API] +NodeId=101 +[API] +NodeId=102 +[API] +NodeId=103 +[API] +NodeId=104 +[API] +NodeId=105 +[API] +NodeId=106 +[API] +NodeId=107 +[API] +NodeId=108 +[API] +NodeId=109 +[API] +NodeId=110 +[API] +NodeId=111 + +[API] +NodeId=200 + +[API] +NodeId=201 +[API] +NodeId=202 +[API] +NodeId=203 +[API] +NodeId=204 +[API] +NodeId=205 +[API] +NodeId=206 +[API] +NodeId=207 +[API] +NodeId=208 +[API] +NodeId=209 +[API] +NodeId=210 +[API] +NodeId=211 + +[API] +NodeId=212 +[API] +NodeId=213 +[API] +NodeId=214 +[API] +NodeId=215 +[API] +NodeId=216 +[API] +NodeId=217 +[API] +NodeId=218 +[API] +NodeId=219 +[API] +NodeId=220 +[API] +NodeId=221 +[API] +NodeId=222 +` +) + +func TestWriteTestConfigFile(t *testing.T) { + t.Log("Writing test config to" + ConfigFilePath) + f, err := os.Create(ConfigFilePath) + if err != nil { + t.Fatal(err) + } + + defer func() { + err = f.Close() + }() + + w := bufio.NewWriter(f) + defer func() { + err = w.Flush() + }() + + w.WriteString(ConfigFileContent) +} + +func TestReadConfigFile(t *testing.T) { + t.Log("Reading test config " + ConfigFilePath) + + var err error + gConfig, err = Read(ConfigFilePath) + if err != nil { + t.Fatal(err) + } + + t.Log(gConfig) +} + +func TestGetSection(t *testing.T) { + s, err := getConfig().Section("NDBD DEFAULT") + if err != nil { + t.Error(err) + } + + t.Log(s) +} + +func TestGetSections(t *testing.T) { + s, err := getConfig().Sections("NDBD") + if err != nil { + t.Error(err) + } + + t.Log(s) +} + +func TestSetNewValue(t *testing.T) { + s, err := getConfig().Section(SectionName1) + if err != nil { + t.Error(err) + } + + t.Logf("%s=%s\n", Key1, s.ValueOf(Key1)) + oldValue := s.SetValueFor(Key1, "512M") + t.Logf("New: %s=%s\n", Key1, s.ValueOf(Key1)) + if oldValue == s.ValueOf(Key1) { + t.Error("Unable to change value for key " + s.ValueOf(Key1)) + } +} + +func TestAddOption(t *testing.T) { + s, err := getConfig().Section(SectionName1) + if err != nil { + t.Error(err) + } + + testAddOption(s, Key3, "128G", t) + testAddOption(s, Key4, "16", t) + + testAddOption(s, Key3, "64G", t) + testAddOption(s, Key4, "8", t) +} + +func TestDeleteOption(t *testing.T) { + s, err := getConfig().Section(SectionName1) + if err != nil { + t.Error(err) + } + + testDeleteOption(s, Key2, t) +} + +func TestNotExistsOption(t *testing.T) { + s, err := getConfig().Section(SectionName1) + if err != nil { + t.Error(err) + } + + if s.Exists("none_existing_key") { + t.Error("none existing key found") + } +} + +func TestNewSection(t *testing.T) { + s := getConfig().NewSection(SectionName2) + s.Add(Key5, "/var/lib/mongodb") + s.Add(Key6, "true") +} + +func TestGetNewSections(t *testing.T) { + s, err := getConfig().Section(SectionName2) + if err != nil { + t.Error(err) + } + if !s.Exists(Key5) { + t.Error(Key5 + " does not exists") + } + + if !s.Exists(Key6) { + t.Error(Key6 + " does not exists") + } + + t.Log(s) +} + +func TestDeleteSection(t *testing.T) { + c := getConfig() + sections, err := c.Delete(SectionName3) + if err != nil { + t.Error(err) + } + for _, s := range sections { + t.Log(s) + } +} + +func TestFindSection(t *testing.T) { + c := getConfig() + sections, err := c.Find(SectionNameRegex) + if err != nil { + t.Error(err) + } + for _, s := range sections { + t.Log(s) + } +} + +func TestSaveNewConfigFile(t *testing.T) { + c := getConfig() + + err := Save(c, ConfigNewFilePath) + if err != nil { + t.Fatal(err) + } +} + +func TestSHA(t *testing.T) { + out, err := exec.Command("shasum", ConfigNewFilePath).Output() + if err != nil { + t.Fatal(err) + } + sha := strings.Split(string(out), " ") + t.Logf("%v=%v", sha[0], ConfigNewFilePathSHA) + if sha[0] != ConfigNewFilePathSHA { + t.Error(ConfigNewFilePath + " shasum doees not match!") + } +} + +func getConfig() *Configuration { + if gConfig == nil { + log.Println("No configuration instance!") + os.Exit(1) + } + return gConfig +} + +func testAddOption(s *Section, name string, value string, t *testing.T) { + oldValue := s.Add(name, value) + t.Logf("%s=%s, old value: %s\n", name, s.ValueOf(name), oldValue) + if oldValue == s.ValueOf(name) { + t.Error("Unable to change value for key " + s.ValueOf(name)) + } +} + +func testDeleteOption(s *Section, name string, t *testing.T) { + oldValue := s.Delete(name) + t.Logf("%s=%s\n", name, oldValue) +} diff --git a/vendor/github.com/alyu/configparser/example_test.go b/vendor/github.com/alyu/configparser/example_test.go new file mode 100644 index 0000000000..90aef9db7c --- /dev/null +++ b/vendor/github.com/alyu/configparser/example_test.go @@ -0,0 +1,73 @@ +package configparser_test + +import ( + "fmt" + "github.com/alyu/configparser" + "log" +) + +// Read and modify a configuration file +func Example() { + config, err := configparser.Read("/etc/config.ini") + if err != nil { + log.Fatal(err) + } + // Print the full configuration + fmt.Println(config) + + // get a section + section, err := config.Section("MYSQLD DEFAULT") + if err != nil { + log.Fatal(err) + } else { + fmt.Printf("TotalSendBufferMemory=%s\n", section.ValueOf("TotalSendBufferMemory")) + + // set new value + var oldValue = section.SetValueFor("TotalSendBufferMemory", "256M") + fmt.Printf("TotalSendBufferMemory=%s, old value=%s\n", section.ValueOf("TotalSendBufferMemory"), oldValue) + + // delete option + oldValue = section.Delete("DefaultOperationRedoProblemAction") + fmt.Println("Deleted DefaultOperationRedoProblemAction: " + oldValue) + + // add new options + section.Add("innodb_buffer_pool_size", "64G") + section.Add("innodb_buffer_pool_instances", "8") + } + + // add a new section and options + section = config.NewSection("NDBD MGM") + section.Add("NodeId", "2") + section.Add("HostName", "10.10.10.10") + section.Add("PortNumber", "1186") + section.Add("ArbitrationRank", "1") + + // find all sections ending with .webservers + sections, err := config.Find(".webservers$") + if err != nil { + log.Fatal(err) + } + for _, section := range sections { + fmt.Print(section) + } + // or + config.PrintSection("dc1.webservers") + + sections, err = config.Delete("NDB_MGMD DEFAULT") + if err != nil { + log.Fatal(err) + } + // deleted sections + for _, section := range sections { + fmt.Print(section) + } + + options := section.Options() + fmt.Println(options["HostName"]) + + // save the new config. the original will be renamed to /etc/config.ini.bak + err = configparser.Save(config, "/etc/config.ini") + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/apache/thrift/.clang-format b/vendor/github.com/apache/thrift/.clang-format new file mode 100644 index 0000000000..a62eef8576 --- /dev/null +++ b/vendor/github.com/apache/thrift/.clang-format @@ -0,0 +1,56 @@ +--- +Language: Cpp +# BasedOnStyle: LLVM +AccessModifierOffset: -2 +ConstructorInitializerIndentWidth: 2 +AlignEscapedNewlinesLeft: false +AlignTrailingComments: true +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Inline +AlwaysBreakTemplateDeclarations: true +AlwaysBreakBeforeMultilineStrings: true +BreakBeforeBinaryOperators: true +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BinPackParameters: false +ColumnLimit: 100 +ConstructorInitializerAllOnOneLineOrOnePerLine: true +DerivePointerAlignment: false +IndentCaseLabels: false +IndentWrappedFunctionNames: false +IndentFunctionDeclarationAfterType: false +MaxEmptyLinesToKeep: 1 +KeepEmptyLinesAtTheStartOfBlocks: true +NamespaceIndentation: None +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PenaltyBreakBeforeFirstCallParameter: 190 +PenaltyBreakComment: 300 +PenaltyBreakString: 10000 +PenaltyBreakFirstLessLess: 120 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 1200 +PointerAlignment: Left +SpacesBeforeTrailingComments: 1 +Cpp11BracedListStyle: true +Standard: Auto +IndentWidth: 2 +TabWidth: 4 +UseTab: Never +BreakBeforeBraces: Attach +SpacesInParentheses: false +SpacesInAngles: false +SpaceInEmptyParentheses: false +SpacesInCStyleCastParentheses: false +SpacesInContainerLiterals: true +SpaceBeforeAssignmentOperators: true +ContinuationIndentWidth: 4 +CommentPragmas: '^ IWYU pragma:' +ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ] +SpaceBeforeParens: ControlStatements +DisableFormat: false +... + diff --git a/vendor/github.com/apache/thrift/.dockerignore b/vendor/github.com/apache/thrift/.dockerignore new file mode 100644 index 0000000000..2d2ecd68da --- /dev/null +++ b/vendor/github.com/apache/thrift/.dockerignore @@ -0,0 +1 @@ +.git/ diff --git a/vendor/github.com/apache/thrift/.editorconfig b/vendor/github.com/apache/thrift/.editorconfig new file mode 100755 index 0000000000..3611762c8a --- /dev/null +++ b/vendor/github.com/apache/thrift/.editorconfig @@ -0,0 +1,112 @@ +# +## Licensed to the Apache Software Foundation (ASF) under one +## or more contributor license agreements. See the NOTICE file +## distributed with this work for additional information +## regarding copyright ownership. The ASF licenses this file +## to you under the Apache License, Version 2.0 (the +## "License"); you may not use this file except in compliance +## with the License. You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, +## software distributed under the License is distributed on an +## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +## KIND, either express or implied. See the License for the +## specific language governing permissions and limitations +## under the License. +## +# + +# EditorConfig: http://editorconfig.org +# see doc/coding_standards.md + +root = true + +[*] +end_of_line = lf +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +# ActionScript +# [*.as] + +# C +# [*.c] + +# C++ +[*.cpp] +indent_style = space +indent_size = 2 + +# C-Sharp +# [*.cs] + +# D +# [*.d] + +# Erlang +# [*.erl] + +# Go-lang +[*.go] +indent_style = tab +indent_size = 8 + +# C header files +# [*.h] + +# Haskell +# [*.hs] + +# Haxe +# [*.hx] + +# Java +# [*.java] + +# Javascript +[*.js] +indent_style = space +indent_size = 2 + +# JSON +[*.json] +indent_style = space +indent_size = 2 + +# Lua +# [*.lua] + +[*.markdown] +indent_style = space +trim_trailing_whitespace = false + +[*.md] +indent_style = space +trim_trailing_whitespace = false + +# OCaml +# [*.ml] + +# Delphi Pascal +# [*.pas] + +# PHP +# [*.php] + +# Perl +# [*.pm] + +# Python +# [*.py] + +# Ruby +# [*.rb] + +# Typescript +# [*.ts] + +# XML +# [*.xml] diff --git a/vendor/github.com/apache/thrift/.gitattributes b/vendor/github.com/apache/thrift/.gitattributes new file mode 100644 index 0000000000..176a458f94 --- /dev/null +++ b/vendor/github.com/apache/thrift/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/apache/thrift/.gitignore b/vendor/github.com/apache/thrift/.gitignore new file mode 100644 index 0000000000..140c93b003 --- /dev/null +++ b/vendor/github.com/apache/thrift/.gitignore @@ -0,0 +1,326 @@ +# generic ignores +*.la +*.lo +*.o +*.deps +*.dirstamp +*.libs +*.log +*.trs +*.suo +*.pyc +*.cache +*.user +*.ipch +*.sdf +*.jar +*.exe +*.dll +*_ReSharper* +*.opensdf +*.swp +*.hi +*~ + +.*project +junit*.properties +.idea +gen-* +Makefile +Makefile.in +aclocal.m4 +acinclude.m4 +autom4te.cache +cmake-* +node_modules +compile +test-driver +erl_crash.dump + +.sonar +.DS_Store +.svn +.vagrant + +/contrib/.vagrant/ +/aclocal/libtool.m4 +/aclocal/lt*.m4 +/autoscan.log +/autoscan-*.log +/cmake_* +/compiler/cpp/compiler.VC.db +/compiler/cpp/compiler.VC.VC.opendb +/compiler/cpp/test/plugin/t_cpp_generator.cc +/compiler/cpp/src/thrift/plugin/plugin_constants.cpp +/compiler/cpp/src/thrift/plugin/plugin_constants.h +/compiler/cpp/src/thrift/plugin/plugin_types.cpp +/compiler/cpp/src/thrift/plugin/plugin_types.h +/compiler/cpp/test/*test +/compiler/cpp/test/thrift-gen-* +/compiler/cpp/src/thrift/thrift-bootstrap +/compiler/cpp/src/thrift/plugin/gen.stamp +/compiler/cpp/Debug +/compiler/cpp/Release +/compiler/cpp/src/thrift/libparse.a +/compiler/cpp/src/thrift/thriftl.cc +/compiler/cpp/src/thrift/thrifty.cc +/compiler/cpp/src/thrift/thrifty.hh +/compiler/cpp/src/thrift/windows/version.h +/compiler/cpp/thrift +/compiler/cpp/thriftl.cc +/compiler/cpp/thrifty.cc +/compiler/cpp/lex.yythriftl.cc +/compiler/cpp/thrifty.h +/compiler/cpp/thrifty.hh +/compiler/cpp/src/thrift/version.h +/config.* +/configure +/configure.lineno +/configure.scan +/contrib/fb303/config.cache +/contrib/fb303/config.log +/contrib/fb303/config.status +/contrib/fb303/configure +/contrib/fb303/cpp/libfb303.a +/contrib/fb303/java/build/ +/contrib/fb303/py/build/ +/contrib/fb303/py/fb303/FacebookService-remote +/contrib/fb303/py/fb303/FacebookService.py +/contrib/fb303/py/fb303/__init__.py +/contrib/fb303/py/fb303/constants.py +/contrib/fb303/py/fb303/ttypes.py +/depcomp +/install-sh +/lib/cpp/Debug/ +/lib/cpp/Debug-mt/ +/lib/cpp/Release/ +/lib/cpp/Release-mt/ +/lib/cpp/src/thrift/qt/moc_TQTcpServer.cpp +/lib/cpp/src/thrift/qt/moc__TQTcpServer.cpp +/lib/cpp/src/thrift/config.h +/lib/cpp/src/thrift/stamp-h2 +/lib/cpp/test/Benchmark +/lib/cpp/test/AllProtocolsTest +/lib/cpp/test/DebugProtoTest +/lib/cpp/test/DenseProtoTest +/lib/cpp/test/EnumTest +/lib/cpp/test/JSONProtoTest +/lib/cpp/test/OptionalRequiredTest +/lib/cpp/test/SecurityTest +/lib/cpp/test/SpecializationTest +/lib/cpp/test/ReflectionTest +/lib/cpp/test/RecursiveTest +/lib/cpp/test/TFDTransportTest +/lib/cpp/test/TFileTransportTest +/lib/cpp/test/TInterruptTest +/lib/cpp/test/TNonblockingServerTest +/lib/cpp/test/TPipedTransportTest +/lib/cpp/test/TServerIntegrationTest +/lib/cpp/test/TSocketInterruptTest +/lib/cpp/test/TransportTest +/lib/cpp/test/UnitTests +/lib/cpp/test/ZlibTest +/lib/cpp/test/OpenSSLManualInitTest +/lib/cpp/test/concurrency_test +/lib/cpp/test/link_test +/lib/cpp/test/processor_test +/lib/cpp/test/tests.xml +/lib/cpp/concurrency_test +/lib/cpp/*.pc +/lib/cpp/x64/Debug/ +/lib/cpp/x64/Debug-mt/ +/lib/cpp/x64/Release +/lib/cpp/x64/Release-mt +/lib/c_glib/*.gcda +/lib/c_glib/*.gcno +/lib/c_glib/*.loT +/lib/c_glib/src/thrift/config.h +/lib/c_glib/src/thrift/stamp-h3 +/lib/c_glib/test/*.gcno +/lib/c_glib/test/testwrapper.sh +/lib/c_glib/test/testwrapper-test* +/lib/c_glib/test/testapplicationexception +/lib/c_glib/test/testbinaryprotocol +/lib/c_glib/test/testcompactprotocol +/lib/c_glib/test/testbufferedtransport +/lib/c_glib/test/testcontainertest +/lib/c_glib/test/testdebugproto +/lib/c_glib/test/testfdtransport +/lib/c_glib/test/testframedtransport +/lib/c_glib/test/testmemorybuffer +/lib/c_glib/test/testoptionalrequired +/lib/c_glib/test/testsimpleserver +/lib/c_glib/test/teststruct +/lib/c_glib/test/testthrifttest +/lib/c_glib/test/testthrifttestclient +/lib/c_glib/test/testtransportsocket +/lib/c_glib/test/testserialization +/lib/c_glib/thriftc.pc +/lib/c_glib/thrift_c_glib.pc +/lib/csharp/**/bin/ +/lib/csharp/**/obj/ +/lib/csharp/src/packages +/lib/d/test/*.pem +/lib/d/libthriftd*.a +/lib/d/test/async_test +/lib/d/test/client_pool_test +/lib/d/test/serialization_benchmark +/lib/d/test/stress_test_server +/lib/d/test/thrift_test_client +/lib/d/test/thrift_test_server +/lib/d/test/transport_test +/lib/d/unittest/ +/lib/dart/coverage +/lib/dart/**/.packages +/lib/dart/**/packages +/lib/dart/**/.pub/ +/lib/dart/**/pubspec.lock +/lib/delphi/src/*.dcu +/lib/delphi/test/*.identcache +/lib/delphi/test/*.local +/lib/delphi/test/*.dcu +/lib/delphi/test/*.2007 +/lib/delphi/test/*.dproj +/lib/delphi/test/*.dproj +/lib/delphi/test/codegen/*.bat +/lib/delphi/test/skip/*.local +/lib/delphi/test/skip/*.identcache +/lib/delphi/test/skip/*.identcache +/lib/delphi/test/skip/*.dproj +/lib/delphi/test/skip/*.dproj +/lib/delphi/test/skip/*.2007 +/lib/delphi/test/serializer/*.identcache +/lib/delphi/test/serializer/*.dproj +/lib/delphi/test/serializer/*.local +/lib/delphi/test/serializer/*.2007 +/lib/delphi/test/serializer/*.dcu +/lib/delphi/test/multiplexed/*.dproj +/lib/delphi/test/multiplexed/*.2007 +/lib/delphi/test/multiplexed/*.local +/lib/delphi/test/multiplexed/*.identcache +/lib/delphi/test/multiplexed/*.dcu +/lib/delphi/test/typeregistry/*.2007 +/lib/delphi/test/typeregistry/*.dproj +/lib/delphi/test/typeregistry/*.identcache +/lib/delphi/test/typeregistry/*.local +/lib/delphi/test/typeregistry/*.dcu +/lib/erl/.generated +/lib/erl/.eunit +/lib/erl/ebin +/lib/erl/deps/ +/lib/erl/src/thrift.app.src +/lib/erl/test/*.hrl +/lib/erl/test/*.beam +/lib/haxe/test/bin +/lib/hs/dist +/lib/java/build +/lib/js/test/build +/lib/nodejs/coverage +/lib/nodejs/node_modules/ +/lib/perl/MANIFEST +/lib/perl/MYMETA.json +/lib/perl/MYMETA.yml +/lib/perl/Makefile-perl.mk +/lib/perl/blib +/lib/perl/pm_to_blib +/lib/py/build +/lib/py/thrift.egg-info/ +/lib/rb/Gemfile.lock +/lib/rb/debug_proto_test +/lib/rb/.config +/lib/rb/ext/conftest.dSYM/ +/lib/rb/ext/mkmf.log +/lib/rb/ext/thrift_native.bundle +/lib/rb/ext/thrift_native.so +/lib/rb/test/ +/lib/rb/thrift-*.gem +/lib/php/src/ext/thrift_protocol/Makefile.* +/lib/php/src/ext/thrift_protocol/build/ +/lib/php/src/ext/thrift_protocol/config.* +/lib/php/src/ext/thrift_protocol/configure +/lib/php/src/ext/thrift_protocol/configure.in +/lib/php/src/ext/thrift_protocol/install-sh +/lib/php/src/ext/thrift_protocol/libtool +/lib/php/src/ext/thrift_protocol/ltmain.sh +/lib/php/src/ext/thrift_protocol/missing +/lib/php/src/ext/thrift_protocol/mkinstalldirs +/lib/php/src/ext/thrift_protocol/modules/ +/lib/php/src/ext/thrift_protocol/php_thrift_protocol.lo +/lib/php/src/ext/thrift_protocol/run-tests.php +/lib/php/src/ext/thrift_protocol/thrift_protocol.la +/lib/php/src/ext/thrift_protocol/tmp-php.ini +/lib/php/src/packages/ +/lib/php/test/TEST-*.xml +/lib/php/test/packages/ +/lib/py/dist/ +/lib/erl/logs/ +/lib/go/test/gopath/ +/lib/go/test/ThriftTest.thrift +/libtool +/ltmain.sh +/missing +/node_modules/ +/stamp-h1 +/test/features/results.json +/test/results.json +/test/c_glib/test_client +/test/c_glib/test_server +/test/cpp/StressTest +/test/cpp/StressTestNonBlocking +/test/cpp/TestClient +/test/cpp/TestServer +/test/dart/**/.packages +/test/dart/**/packages +/test/dart/**/.pub/ +/test/dart/**/pubspec.lock +/test/log/ +/test/test.log +/test/erl/.generated +/test/erl/ebin +/test/go/bin/ +/test/go/ThriftTest.thrift +/test/go/gopath +/test/go/pkg/ +/test/go/src/code.google.com/ +/test/go/src/github.com/golang/ +/test/go/src/gen/ +/test/go/src/thrift +/test/haxe/bin +/test/hs/TestClient +/test/hs/TestServer +/test/py.twisted/_trial_temp/ +/test/rb/Gemfile.lock +/tutorial/cpp/TutorialClient +/tutorial/cpp/TutorialServer +/tutorial/c_glib/tutorial_client +/tutorial/c_glib/tutorial_server +/tutorial/csharp/CsharpServer/obj +/tutorial/csharp/CsharpServer/bin +/tutorial/csharp/CsharpClient/obj +/tutorial/csharp/CsharpClient/bin +/tutorial/d/async_client +/tutorial/d/client +/tutorial/d/server +/tutorial/dart/**/.packages +/tutorial/dart/**/packages +/tutorial/dart/**/.pub/ +/tutorial/dart/**/pubspec.lock +/tutorial/delphi/*.dsk +/tutorial/delphi/*.local +/tutorial/delphi/*.tvsconfig +/tutorial/delphi/DelphiClient/dcu +/tutorial/delphi/DelphiServer/dcu +/tutorial/delphi/DelphiClient/*.local +/tutorial/delphi/DelphiClient/*.identcache +/tutorial/delphi/DelphiServer/*.identcache +/tutorial/delphi/DelphiServer/*.local +/tutorial/go/go-tutorial +/tutorial/go/calculator-remote +/tutorial/go/src/shared +/tutorial/go/src/tutorial +/tutorial/go/src/git.apache.org +/tutorial/haxe/bin +/tutorial/hs/dist/ +/tutorial/java/build/ +/tutorial/js/build/ +/ylwrap diff --git a/vendor/github.com/apache/thrift/.travis.yml b/vendor/github.com/apache/thrift/.travis.yml new file mode 100644 index 0000000000..81a88c3ce0 --- /dev/null +++ b/vendor/github.com/apache/thrift/.travis.yml @@ -0,0 +1,199 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# build Apache Thrift on Travis CI - https://travis-ci.org/ + +sudo: required +dist: trusty + +services: + - docker + +install: + - (travis_wait ./build/docker/check_unmodified.sh $DISTRO && touch .unmodified) || true + - if [ ! -f .unmodified ]; then travis_retry travis_wait docker build -q -t thrift-build:$DISTRO build/docker/$DISTRO; fi + +script: + - docker run --net=host -e BUILD_LIBS="$BUILD_LIBS" $BUILD_ENV -v $(pwd):/thrift/src -it thrift-build:$DISTRO build/docker/scripts/$SCRIPT $BUILD_ARG + +env: + global: + - TEST_NAME="" + - SCRIPT="cmake.sh" + - BUILD_ARG="" + - BUILD_ENV="-e CC=clang -e CXX=clang++" + - DISTRO=ubuntu + - BUILD_LIBS="CPP C_GLIB HASKELL JAVA PYTHON TESTING TUTORIALS" # only meaningful for CMake builds + + matrix: + - TEST_NAME="Cross Language Tests (Binary and Header Protocols)" + SCRIPT="cross-test.sh" + BUILD_ARG="-'(binary|header)'" + BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" + + - TEST_NAME="Cross Language Tests (Debian) (Binary and Header Protocols)" + SCRIPT="cross-test.sh" + BUILD_ARG="-'(binary|header)'" + BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" + DISTRO=debian + + - TEST_NAME="Cross Language Tests (Compact and JSON Protocols)" + SCRIPT="cross-test.sh" + BUILD_ARG="-'(compact|json)'" + BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" + + - TEST_NAME="Cross Language Tests (Debian) (Compact and JSON Protocols)" + SCRIPT="cross-test.sh" + BUILD_ARG="-'(compact|json)'" + BUILD_ENV="-e CC=clang -e CXX=clang++ -e THRIFT_CROSSTEST_CONCURRENCY=4" + DISTRO=debian + + # TODO: Remove them once migrated to CMake + # Autotools builds + - TEST_NAME="C C++ C# D Erlang Haxe Go (automake)" + SCRIPT="autotools.sh" + BUILD_ARG="--without-dart --without-haskell --without-java --without-lua --without-nodejs --without-perl --without-php --without-php_extension --without-python --without-ruby" + + - TEST_NAME="C C++ - GCC (automake)" + SCRIPT="autotools.sh" + BUILD_ARG="--without-csharp --without-java --without-erlang --without-nodejs --without-lua --without-python --without-perl --without-php --without-php_extension --without-dart --without-ruby --without-haskell --without-go --without-haxe --without-d" + BUILD_ENV="-e CC=gcc -e CXX=g++" + + - TEST_NAME="Java Lua PHP Ruby Dart (automake)" + SCRIPT="autotools.sh" + BUILD_ARG="--without-cpp --without-haskell --without-c_glib --without-csharp --without-d --without-erlang --without-go --without-haxe --without-nodejs --without-python --without-perl" + + # These are flaky (due to cabal and npm network/server failures) and also have lengthy output + - TEST_NAME="Haskell Node.js Python Perl (automake)" + SCRIPT="autotools.sh" + BUILD_ARG="--without-cpp --without-c_glib --without-csharp --without-d --without-dart --without-erlang --without-go --without-haxe --without-java --without-lua --without-php --without-php_extension --without-ruby" + + # CMake build + - TEST_NAME="All" + + - TEST_NAME="All (Debian)" + DISTRO=debian + + - TEST_NAME="C C++ - GCC" + BUILD_LIBS="CPP C_GLIB TESTING TUTORIALS" + BUILD_ARG="-DWITH_PYTHON=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" + BUILD_ENV="-e CC=gcc -e CXX=g++" + + - TEST_NAME="C++ (Boost Thread)" + BUILD_LIBS="CPP TESTING TUTORIALS" + BUILD_ARG="-DWITH_BOOSTTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" + + - TEST_NAME="C++ (Boost Thread - GCC)" + BUILD_LIBS="CPP TESTING TUTORIALS" + BUILD_ARG="-DWITH_BOOSTTHREADS=ON -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" + BUILD_ENV="-e CC=gcc -e CXX=g++" + + - TEST_NAME="C++ (Std Thread)" + BUILD_LIBS="CPP TESTING TUTORIALS" + BUILD_ARG="-DWITH_STDTHREADS=ON -DCMAKE_CXX_FLAGS='-std=c++11' -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" + + - TEST_NAME="C++ (Std Thread - GCC)" + BUILD_LIBS="CPP TESTING TUTORIALS" + BUILD_ARG="-DWITH_STDTHREADS=ON -DCMAKE_CXX_FLAGS='-std=c++11' -DWITH_PYTHON=OFF -DWITH_C_GLIB=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" + BUILD_ENV="-e CC=gcc -e CXX=g++" + + - TEST_NAME="Compiler (mingw)" + BUILD_LIBS="" + BUILD_ARG="-DCMAKE_TOOLCHAIN_FILE=../build/cmake/mingw32-toolchain.cmake -DBUILD_COMPILER=ON -DBUILD_LIBRARIES=OFF -DBUILD_TESTING=OFF -DBUILD_EXAMPLES=OFF" + BUILD_ENV="" + + - TEST_NAME="All - GCC (CentOS)" + BUILD_ENV="-e CC=gcc -e CXX=g++" + DISTRO=centos + + - TEST_NAME="C C++ - Clang (CentOS)" + BUILD_LIBS="CPP C_GLIB TESTING TUTORIALS" + BUILD_ARG="-DWITH_PYTHON=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" + DISTRO=centos + + - TEST_NAME="Python 2.6 (CentOS 6)" + BUILD_LIBS="PYTHON TESTING TUTORIALS" + BUILD_ARG="-DWITH_PYTHON=ON -DWITH_CPP=OFF -DWITH_JAVA=OFF -DWITH_HASKELL=OFF" + BUILD_ENV="-e CC=gcc -e CXX=g++" + DISTRO=centos6 + + # Distribution + - TEST_NAME="make dist" + SCRIPT="make-dist.sh" + BUILD_ENV="-e CC=gcc -e CXX=g++" + + - TEST_NAME="Debian Packages" + SCRIPT="dpkg.sh" + BUILD_ENV="-e CC=gcc -e CXX=g++" + + - TEST_NAME="make dist (Debian)" + SCRIPT="make-dist.sh" + BUILD_ENV="-e CC=gcc -e CXX=g++" + DISTRO=debian + + - TEST_NAME="Debian Packages (Debian)" + SCRIPT="dpkg.sh" + BUILD_ENV="-e CC=gcc -e CXX=g++" + DISTRO=debian + +matrix: + include: + # QA jobs for code analytics and metrics + # + # C/C++ static code analysis with cppcheck + # add --error-exitcode=1 to --enable=all as soon as everything is fixed + # + # Python code style check with flake8 + # + # search for TODO etc within source tree + # some statistics about the code base + # some info about the build machine + - env: TEST_NAME="cppcheck, flake8, TODO FIXME HACK, LoC and system info" + install: + - travis_retry sudo apt-get update + - travis_retry sudo apt-get install -ym cppcheck sloccount python-flake8 + script: + # Compiler cppcheck (All) + - cppcheck --force --quiet --inline-suppr --enable=all -j2 compiler/cpp/src + # C++ cppcheck (All) + - cppcheck --force --quiet --inline-suppr --enable=all -j2 lib/cpp/src lib/cpp/test test/cpp tutorial/cpp + # C Glib cppcheck (All) + - cppcheck --force --quiet --inline-suppr --enable=all -j2 lib/c_glib/src lib/c_glib/test test/c_glib/src tutorial/c_glib + # Silent error checks + - cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 compiler/cpp/src + - cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 lib/cpp/src lib/cpp/test test/cpp tutorial/cpp + - cppcheck --force --quiet --inline-suppr --error-exitcode=1 -j2 lib/c_glib/src lib/c_glib/test test/c_glib/src tutorial/c_glib + # Python code style + - flake8 --ignore=E501 lib/py + - flake8 tutorial/py + - flake8 --ignore=E501 test/py + - flake8 test/py.twisted + - flake8 test/py.tornado + - flake8 --ignore=E501 test/test.py + - flake8 --ignore=E501 test/crossrunner + - flake8 test/features + # TODO etc + - grep -r TODO * + - grep -r FIXME * + - grep -r HACK * + # LoC + - sloccount . + # System Info + - dpkg -l + - uname -a diff --git a/vendor/github.com/apache/thrift/CHANGES b/vendor/github.com/apache/thrift/CHANGES new file mode 100644 index 0000000000..7b674d6f65 --- /dev/null +++ b/vendor/github.com/apache/thrift/CHANGES @@ -0,0 +1,2366 @@ +Apache Thrift Changelog + +Thrift 0.10.0 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-1840] - Thrift Generated Code Causes Global Variable Leaks + * [THRIFT-1828] - moc_TQTcpServer.cpp was removed from source tree but is in thrift-0.9.0.tar.gz + * [THRIFT-1790] - cocoa: Duplicate interface definition error + * [THRIFT-1776] - TPipeServer should implement "listen", so that TServerEventHandler preServe will work right + * [THRIFT-1351] - Compiler does not care about binary strings + * [THRIFT-1229] - Python fastbinary.c can not handle unicode as generated python code + * [THRIFT-749] - C++ TBufferedTransports do not flush their buffers on delete + * [THRIFT-747] - C++ TSocket->close calls shutdown breaking forked parent process + * [THRIFT-732] - server exits abnormally when client calls send_xxx function without calling recv_xxx function + * [THRIFT-3942] - TSSLSocket does not honor send and receive timeouts + * [THRIFT-3941] - WinXP version of thrift_poll() relies on undefined behavior by passing a destructed variable to select() + * [THRIFT-3940] - Visual Studio project file for compiler is broken + * [THRIFT-3943] - Coverity Scan identified some high severity defects + * [THRIFT-3929] - PHP "nsglobal" Option Results in Syntax Error in Generated Code (Trailing Backslash) + * [THRIFT-3936] - Cannot compile 0.10.0 development tip with VS2013 and earlier (snprintf, uint32_t) + * [THRIFT-3935] - Incorrect skipping of map and set + * [THRIFT-3920] - Ruby: Ensuring that HTTP failures will clear the http transport outbuf var + * [THRIFT-3919] - C# TTLSServerSocket does not use clientTimeout + * [THRIFT-3917] - Check backports.ssl_match_hostname module version + * [THRIFT-3909] - Fix c_glib static lib CMake build + * [THRIFT-3904] - Typo in node tutorial leads to wrong transport being used + * [THRIFT-3848] - As an implementer of a perl socket server, I do not want to have to remember to ignore SIGCHLD for it to work properly + * [THRIFT-3844] - thrift_protocol cannot compile in 7.0.7 + * [THRIFT-3843] - integer issues with Haxe PHP targets cause ZigZag encoding to fail + * [THRIFT-3842] - Dart generates incorrect code for a const struct + * [THRIFT-3841] - dart compact protocol incorrectly serializes/deserialized doubles + * [THRIFT-3708] - NameError: global name 'TProtocol' is not defined + * [THRIFT-3704] - "TConnectedClient died: Could not refill buffer" message shown when using HTTP Server + * [THRIFT-3678] - Fix javadoc errors on JDK 8 + * [THRIFT-3014] - AppVeyor support + * [THRIFT-2994] - Node.js TJSONProtocol cannot be used for object serialization. + * [THRIFT-2974] - writeToParcel throws NPE for optional enum fields + * [THRIFT-2948] - Python TJSONProtocol doesn't handle structs with binary fields containing invalid unicode. + * [THRIFT-2845] - ChildService.Plo: No such file or directory + * [THRIFT-3276] - Binary data does not decode correctly using the TJSONProtocol when the base64 encoded data is padded. + * [THRIFT-3253] - Using latest version of D gives deprecation notices + * [THRIFT-2883] - TTwisted.py, during ConnectionLost processing: exceptions.RuntimeError: dictionary changed size during iteration + * [THRIFT-2019] - Writing on a disconnected socket on Mac causes SIG PIPE + * [THRIFT-2020] - Thrift library has some empty files that haven't really been deleted + * [THRIFT-2049] - Go compiler doesn't build on native Windows + * [THRIFT-2024] - TServer.cpp warns on 64-bit platforms about truncating an rlim_t into an int + * [THRIFT-2023] - gettimeofday implementation on Windows errors when no time zone is passed in. + * [THRIFT-2022] - CoB and dense code generation still uses TR1 bind, even though that doesn't work with clang + * [THRIFT-2027] - Minor 64-bit and NOMINMAX issues in C++ library + * [THRIFT-2156] - TServerSocket::listen() is throwing exceptions with misleading information + * [THRIFT-2154] - Missing #deepCopy should return T + * [THRIFT-3157] - TBase signature should be TBase, F extends TFieldIdEnum> + * [THRIFT-3156] - Node TLS: server executes processing logic two full times + * [THRIFT-3154] - tutorial/py.tornado throw EOF exception + * [THRIFT-3063] - C++ build -Wunused-parameter warnings on processor_test, TransportTest + * [THRIFT-3056] - Add string/collection length limits for Python protocol readers + * [THRIFT-3237] - Fix TNamedPipeServer::createNamedPipe memory leak + * [THRIFT-3233] - Fix C++ ThreadManager::Impl::removeWorker worker join + * [THRIFT-3232] - Cannot deserialize json messages created with fieldNamesAsString + * [THRIFT-3206] - Fix Visual Studio build failure due 'pthread_self': identifier not found + * [THRIFT-3200] - JS and nodejs do not encode JSON protocol binary fields as base64 + * [THRIFT-3199] - Exception field has basic metadata + * [THRIFT-3182] - TFramedTransport is in an invalid state after frame size exception + * [THRIFT-2536] - new TSocket, uninitialised value reported by valgrind + * [THRIFT-2527] - Apache Thrift IDL Compiler code generated for Node.js should be jshint clean + * [THRIFT-2519] - "processor" class is not being generated + * [THRIFT-2431] - TFileTransportTest fails with "check delta < XXX failed" + * [THRIFT-2708] - Erlang library does not support "oneway" message type + * [THRIFT-3377] - Deep copy is actually shallow when using typedef members + * [THRIFT-3376] - C# and Python JSON protocol double values lose precision + * [THRIFT-3373] - Various fixes for cross test servers and clients + * [THRIFT-3370] - errno extern variable redefined. Not compiling for Android + * [THRIFT-3379] - Potential out of range panic in Go JSON protocols + * [THRIFT-3371] - Abstract namespace Unix domain sockets broken in C++ + * [THRIFT-3380] - nodejs: 0.9.2 -> 0.9.3 upgrade breaks Protocol and Transport requires + * [THRIFT-3367] - Fix bad links to coding_standards.md #634 + * [THRIFT-3401] - Nested collections emit Objective-C code that cannot compile + * [THRIFT-3403] - JSON String reader doesn't recognize UTF-16 surrogate pairs + * [THRIFT-3362] - make check fails for C++ at the SecurityTest + * [THRIFT-3395] - Cocoa compiler produces corrupt code when boxing enums inside map. + * [THRIFT-3394] - compiler generates uncompilable code + * [THRIFT-3388] - hash doesn't work on set/list + * [THRIFT-3391] - Wrong bool formatting in test server + * [THRIFT-3390] - TTornado server doesn't handle closed connections properly + * [THRIFT-3382] - TBase class for C++ Library + * [THRIFT-3392] - Java TZlibTransport does not close its wrapper streams upon close() + * [THRIFT-3383] - i64 related warnings + * [THRIFT-3386] - misc. warnings with make check + * [THRIFT-3385] - warning: format ‘%lu’ expects ‘long unsigned int’, but has type ‘std::basic_string::size_type {aka unsigned int} + * [THRIFT-3355] - npm WARN package.json thrift@1.0.0-dev No license field. + * [THRIFT-3360] - Improve cross test servers and clients further + * [THRIFT-3359] - Binary field incompatibilities + * [THRIFT-3354] - Fix word-extraction substr bug in initialism code + * [THRIFT-3350] - Python JSON protocol does not encode binary as Base64 + * [THRIFT-3577] - assertion failed at line 512 of testcontainertest.c + * [THRIFT-3576] - Boost test --log_format arg does not accept lowercase + * [THRIFT-3575] - Go compiler tries to use unexported library methods when using read_write_private + * [THRIFT-3574] - Cocoa generator makes uncompilable imports + * [THRIFT-3570] - Remove duplicate instances that are added by upstream + * [THRIFT-3571] - Make feature test result browsable + * [THRIFT-3569] - c_glib protocols do not check number of bytes read by transport + * [THRIFT-3568] - THeader server crashes on readSlow + * [THRIFT-3567] - GLib-GObject-CRITICAL **: g_object_unref: assertion 'G_IS_OBJECT (object)' failed + * [THRIFT-3566] - C++/Qt: TQTcpServerTest::test_communicate() is never executed + * [THRIFT-3564] - C++/Qt: potential core dump in TQTcpServer in case an exception occurs in TAsyncProcessor::process() + * [THRIFT-3558] - typos in c_glib tests + * [THRIFT-3559] - Fix awkward extra semi-colons with Cocoa container literals + * [THRIFT-3555] - 'configure' script does not honor --with-openssl= for libcrypto for BN_init + * [THRIFT-3554] - Constant decls may lead to "Error: internal error: prepare_member_name_mapping() already active for different struct" + * [THRIFT-3552] - glib_c Memory Leak + * [THRIFT-3551] - Thrift perl library missing package declaration + * [THRIFT-3549] - Exceptions are not properly stringified in Perl library + * [THRIFT-3546] - NodeJS code should not be namespaced (and is currently not strict-mode compliant) + * [THRIFT-3545] - Container type literals do not compile + * [THRIFT-3538] - Remove UnboundMethodType in TProtocolDecorator + * [THRIFT-3536] - Error 'char' does not contain a definition for 'IsLowSurrogate' for WP7 target + * [THRIFT-3534] - Link error when building with Qt5 + * [THRIFT-3533] - Can not send nil pointer as service method argument + * [THRIFT-3507] - THttpClient does not use proxy from http_proxy, https_proxy environment variables + * [THRIFT-3502] - C++ TServerSocket passes small buffer to getsockname + * [THRIFT-3501] - Forward slash in comment causes compiler error + * [THRIFT-3498] - C++ library assumes optional function pthread_attr_setschedpolicy is available + * [THRIFT-3497] - Build fails with "invalid use of incomplete type" + * [THRIFT-3496] - C++: Cob style client fails when sending a consecutive request + * [THRIFT-3493] - libthrift does not compile on windows using visual studio + * [THRIFT-3488] - warning: unused variable 'program' + * [THRIFT-3489] - warning: deprecated conversion from string constant to 'char*' [-Wwrite-strings] + * [THRIFT-3487] - Full support for newer Delphi versions + * [THRIFT-3528] - Fix warnings in thrift.ll + * [THRIFT-3527] - -gen py:dynamic,utf8strings ignores utf8strings option + * [THRIFT-3526] - Code generated by py:utf8strings does not work for Python3 + * [THRIFT-3524] - dcc32 warning "W1000 Symbol 'IsLowSurrogate' is deprecated: 'Use TCharHelper'" in Thrift.Protocol.JSON.pas + * [THRIFT-3525] - py:dynamic fails to handle binary list/set/map element + * [THRIFT-3521] - TSimpleJSONProtocolTest is not deterministic (fails when run on JDK 8) + * [THRIFT-3520] - Dart TSocket onError stream should be typed as Object + * [THRIFT-3519] - fastbinary does not work with -gen py:utf8strings + * [THRIFT-3518] - TConcurrentClientSyncInfo files were missing for Visual Studio + * [THRIFT-3512] - c_glib: Build fails due to missing features.h + * [THRIFT-3483] - Incorrect empty binary handling introduced by THRIFT-3359 + * [THRIFT-3479] - Oneway calls should not return exceptions to clients + * [THRIFT-3478] - Restore dropped method to THsHaServer.java + * [THRIFT-3477] - Parser fails on enum item that starts with 'E' letter and continues with number + * [THRIFT-3476] - Missing include in ./src/thrift/protocol/TJSONProtocol.cpp + * [THRIFT-3474] - Docker: thrift-compiler + * [THRIFT-3473] - When "optional' is used with a struct member, C++ server seems to not return it correctly + * [THRIFT-3468] - Dart TSocketTransport onError handler is too restrictive + * [THRIFT-3451] - thrift_protocol PHP extension missing config.m4 file + * [THRIFT-3456] - rounding issue in static assert + * [THRIFT-3455] - struct write method's return value is incorrect + * [THRIFT-3454] - Python Tornado tutorial is broken + * [THRIFT-3463] - Java can't be disabled in CMake build + * [THRIFT-3450] - NPE when using SSL + * [THRIFT-3449] - TBaseAsyncProcessor fb.responseReady() never called for oneway functions + * [THRIFT-3471] - Dart generator does not handle uppercase argument names + * [THRIFT-3470] - Sporadic timeouts with pipes + * [THRIFT-3465] - Go Code With Complex Const Initializer Compilation Depends On Struct Order + * [THRIFT-3464] - Fix several defects in c_glib code generator + * [THRIFT-3462] - Cocoa generates Incorrect #import header names + * [THRIFT-3453] - remove rat_exclude + * [THRIFT-3418] - Use of ciphers in ssl.wrap_socket() breaks python 2.6 compatibility + * [THRIFT-3417] - "namespace xsd" is not really working + * [THRIFT-3413] - Thrift code generation bug in Go when extending service + * [THRIFT-3420] - C++: TSSLSockets are not interruptable + * [THRIFT-3415] - include unistd.h conditionally + * [THRIFT-3414] - #include in THeaderTransport.h breaks windows build + * [THRIFT-3411] - Go generates remotes with wrong package qualifiers when including + * [THRIFT-3430] - Go THttpClient does not read HTTP response body to completion when closing + * [THRIFT-3423] - First call to thrift_transport:read_exact fails to dispatch correct function + * [THRIFT-3422] - Go TServerSocket doesn't close on Interrupt + * [THRIFT-3421] - rebar as dependency instead of bundling (was: rebar fails if PWD contains Unicode) + * [THRIFT-3428] - Go test fails when running make check + * [THRIFT-3445] - Throwable messages are hidden from JVM stack trace output + * [THRIFT-3443] - Thrift include can generate uncompilable code + * [THRIFT-3444] - Large 64 bit Integer does not preserve value through Node.js JSONProtocol + * [THRIFT-3436] - misc. cross test issues with UTF-8 path names + * [THRIFT-3435] - Put generated Java code for fullcamel tests in a separate package/namespace + * [THRIFT-3433] - Doubles aren't interpreted correctly + * [THRIFT-3437] - Mingw-w64 build fail + * [THRIFT-3434] - Dart generator produces empty name in pubspec.yaml for includes without namespaces + * [THRIFT-3408] - JSON generator emits incorrect types + * [THRIFT-3406] - Cocoa client should not schedule streams on main runloop + * [THRIFT-3404] - JSON String reader doesn't recognize UTF-16 surrogate pair + * [THRIFT-3636] - Double precision is not fully preserved in C++ TJSONProtocol + * [THRIFT-3632] - c_glib testserialization fails with glib assertion + * [THRIFT-3619] - Using Thrift 0.9.3 with googletest on Linux gcc 4.9 / C++11 + * [THRIFT-3617] - CMake does not build gv/xml generators + * [THRIFT-3615] - Fix Python SSL client resource leak on connection failure + * [THRIFT-3616] - lib/py/test/test_sslsocket.py is flaky + * [THRIFT-3643] - Perl SSL server crushes if a client disconnect without handshake + * [THRIFT-3639] - C# Thrift library forces TLS 1.0, thwarting TLS 1.2 usage + * [THRIFT-3633] - Travis "C C++ - GCC" build was using clang + * [THRIFT-3634] - Fix Python TSocket resource leak on connection failure + * [THRIFT-3630] - Debian/Ubuntu install docs need an update + * [THRIFT-3629] - Parser sets exitcode on errors, but generator does not + * [THRIFT-3608] - lib/cpp/test/SecurityTest is flaky in jenkins Thrift-precommit build. + * [THRIFT-3601] - Better conformance to PEP8 for generated code + * [THRIFT-3599] - Validate client IP address against cert's SubjectAltName + * [THRIFT-3598] - TBufferedTransport doesn't instantiate client connection + * [THRIFT-3597] - `make check` hangs in go tests + * [THRIFT-3589] - Dart generator uses wrong name in constructor for uppercase arguments with defaults + * [THRIFT-3588] - Using TypeScript with --noImplicitAny fails + * [THRIFT-3584] - boolean false value cannot be transferred + * [THRIFT-3578] - Make THeaderTransport detect TCompact framed and unframed + * [THRIFT-3323] - Python library does not handle escaped forward slash ("/") in JSON + * [THRIFT-3322] - CMake generated "make check" failes on python_test + * [THRIFT-3321] - Thrift can't be added as a subdirectory of another CMake-based project + * [THRIFT-3314] - Dots in file names of includes causes dots in javascript variable names + * [THRIFT-3307] - Segfault in Ruby serializer + * [THRIFT-3309] - Missing TConstant.php in /lib/php/Makefile.am + * [THRIFT-3810] - unresolved external symbol public: virtual void __cdecl apache::thrift::server::TServerFramework::serve(void) + * [THRIFT-3736] - C++ library build fails if OpenSSL does not surrpot SSLv3 + * [THRIFT-3878] - Compile error in TSSLSocket.cpp with new OpenSSL [CRYPTO_num_locks] + * [THRIFT-3949] - missing make dist entry for compiler/cpp/test + * [THRIFT-449] - The wire format of the JSON Protocol may not always be valid JSON if it contains non-UTF8 encoded strings + * [THRIFT-162] - Thrift structures are unhashable, preventing them from being used as set elements + * [THRIFT-3961] - TConnectedClient does not terminate the connection to the client if an exception while processing the received message occures. + * [THRIFT-3881] - Travis CI builds are failing due to docker failures (three retries, and gives up) + * [THRIFT-3937] - Cannot compile 0.10.0 development tip with gcc-4.6.x + * [THRIFT-3964] - Unsupported mechanism type ????? due to dependency on default OS-dependent charset + * [THRIFT-3038] - Use of volatile in cpp library + * [THRIFT-3301] - Java generated code uses imports that can lead to class name collisions with IDL defined types + * [THRIFT-3348] - PHP TCompactProtocol bool&int64 readvalue bug + * [THRIFT-3955] - TThreadedServer Memory Leak + * [THRIFT-3829] - Thrift does not install Python Libraries if Twisted is not installed + * [THRIFT-3932] - C++ ThreadManager has a rare termination race + * [THRIFT-3828] - cmake fails when Boost_INCLUDE_DIRS (and other variables passed to include_directories()) is empty + * [THRIFT-3958] - CMake WITH_MT option for windows static runtime linking does not support the cmake build type RelWithDebInfo + * [THRIFT-3957] - TConnectedClient does not disconnect from clients when their timeout is reached. + * [THRIFT-3953] - TSSLSocket::close should handle exceptions from waitForEvent because it is called by the destructor. + * [THRIFT-3977] - PHP extension creates undefined values when deserializing sets + * [THRIFT-3947] - sockaddr type isn't always large enough for the return of getsockname + * [THRIFT-2755] - ThreadSanitizer reports data race in ThreadManager::Impl::addWorker + * [THRIFT-3948] - errno is not the correct method of getting the error in windows + * [THRIFT-4008] - broken ci due to upstream dependency versioning break + * [THRIFT-3999] - Fix Debian & Ubuntu package dependencies + * [THRIFT-3886] - PHP cross test client returns 0 even when failing + * [THRIFT-3997] - building thrift libs does not support new openssl + +## Documentation + * [THRIFT-3867] - Specify BinaryProtocol and CompactProtocol + +## Epic + * [THRIFT-3049] - As an iOS developer, I want a generator and library that produces Swift code + * [THRIFT-2336] - UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + +## Improvement + * [THRIFT-1867] - Python client/server should support client-side certificates. + * [THRIFT-1313] - c_glib compact support + * [THRIFT-1385] - make install doesn't install java library in the setted folder + * [THRIFT-1437] - Update RPM spec + * [THRIFT-847] - Test Framework harmonization across all languages + * [THRIFT-819] - add Enumeration for protocol, transport and server types + * [THRIFT-3927] - Emit an error instead of throw an error in the async callback + * [THRIFT-3931] - TSimpleServer: If process request encounter UNKNOWN_METHOD, don't close transport. + * [THRIFT-3934] - Automatically resolve OpenSSL binary version on Windows CI + * [THRIFT-3918] - Run subset of make cross + * [THRIFT-3908] - Remove redundant dependencies from Dockerfile + * [THRIFT-3907] - Skip Docker image build on CI when unchanged + * [THRIFT-3868] - Java struct equals should do identity check before field comparison + * [THRIFT-3849] - Port Go serializer and deserializer to dart + * [THRIFT-2989] - Complete CMake build for Apache Thrift + * [THRIFT-2980] - ThriftMemoryBuffer doesn't have a constructor option to take an existing buffer + * [THRIFT-2856] - refactor erlang basic transports and unify interfaces + * [THRIFT-2877] - Optimize generated hashCode + * [THRIFT-2869] - JSON: run schema validation from tests + * [THRIFT-3112] - [Java] AsyncMethodCallback should be typed in generated AsyncIface + * [THRIFT-3263] - PHP jsonSerialize() should cast scalar types + * [THRIFT-2905] - Cocoa compiler should have option to produce "modern" Objective-C + * [THRIFT-2821] - Enable the use of custom HTTP-Header in the Transport + * [THRIFT-2093] - added the ability to set compression level in C++ zlib transport + * [THRIFT-2089] - Compiler ignores duplicate typenames + * [THRIFT-2056] - Moved all #include config.h statements to #include + * [THRIFT-2031] - Make SO_KEEPALIVE configurable for C++ lib + * [THRIFT-2021] - Improve large binary protocol string performance + * [THRIFT-2028] - Cleanup threading headers / libraries + * [THRIFT-2014] - Change C++ lib includes to use style throughout + * [THRIFT-2312] - travis.yml: build everything + * [THRIFT-1915] - Multiplexing Services + * [THRIFT-1736] - Visual Studio top level project files within msvc + * [THRIFT-1735] - integrate tutorial into regular build + * [THRIFT-1533] - Make TTransport should be Closeable + * [THRIFT-35] - Move language tests into their appropriate library directory + * [THRIFT-1079] - Support i64 in AS3 + * [THRIFT-1108] - SSL support for the Ruby library + * [THRIFT-3856] - update debian package deependencies + * [THRIFT-3833] - haxe http server implementation (by embeding into php web server) + * [THRIFT-3839] - Performance issue with big message deserialization using php extension + * [THRIFT-3820] - Erlang: Detect OTP >= 18 to use new time correction + * [THRIFT-3816] - Reduce docker build duration on Travis-CI + * [THRIFT-3815] - Put appveyor dependency versions to one place + * [THRIFT-3788] - Compatibility improvements and Win64 support + * [THRIFT-3792] - Timeouts for anonymous pipes should be configurable + * [THRIFT-3794] - Split Delphi application, protocol and transport exception subtypes into separate exceptions + * [THRIFT-3774] - The generated code should have exception_names meta info + * [THRIFT-3762] - Fix build warnings for deprecated Thrift "byte" fields + * [THRIFT-3756] - Improve requiredness documentation + * [THRIFT-3761] - Add debian package for Python3 + * [THRIFT-3742] - haxe php cli support + * [THRIFT-3733] - Socket timeout improvements + * [THRIFT-3728] - http transport for thrift-lua + * [THRIFT-3905] - Dart compiler does not initialize bool, int, and double properties + * [THRIFT-3911] - Loosen Ruby dev dependency version requirements + * [THRIFT-3906] - Run C# tests with make check + * [THRIFT-3900] - Add Python SSL flags + * [THRIFT-3897] - Provide meaningful exception type based on WebExceptionStatus in case of timeout + * [THRIFT-3808] - Missing `DOUBLE` in thrift type enumeration + * [THRIFT-3803] - Remove "file" attribute from XML generator + * [THRIFT-3660] - Add V4 mapped address to test client cert's altname + * [THRIFT-3661] - Use https to download meck in erlang test build + * [THRIFT-3659] - Check configure result of CMake on CI + * [THRIFT-3667] - Add TLS SNI support to clients + * [THRIFT-3651] - Make backports.match_hostname and ipaddress optional + * [THRIFT-3666] - Build D tutorial as part of Autotools build + * [THRIFT-3665] - Add D libevent and OpenSSL to docker images + * [THRIFT-3664] - Remove md5.c + * [THRIFT-3662] - Add Haskell to debian docker image + * [THRIFT-3711] - Add D to cross language test + * [THRIFT-3691] - Run flake8 Python style check on Travis-CI + * [THRIFT-3692] - (Re)enable Appveyor C++ and Python build + * [THRIFT-3677] - Improve CMake Java build + * [THRIFT-3679] - Add stdout log to testBinary in Java test server + * [THRIFT-3718] - Reduce size of docker image for build environment + * [THRIFT-3698] - [Travis-CI] Introduce retry to apt commands + * [THRIFT-3127] - switch -recurse to --recurse and reserve -r + * [THRIFT-3087] - Pass on errors like "connection closed" + * [THRIFT-3240] - Thrift Python client should support subjectAltName and wildcard certs in TSSLSocket + * [THRIFT-3213] - make cross should indicate when it skips a known failing test + * [THRIFT-3208] - Fix Visual Studio solution build failure due to missing source + * [THRIFT-3186] - Add TServerHTTP to Go library + * [THRIFT-2342] - Add __FILE__ and __LINE__ to Thrift C++ excpetions + * [THRIFT-3372] - Add dart generator to Visual Studio project + * [THRIFT-3366] - ThriftTest to implement standard return values + * [THRIFT-3402] - Provide a perl Unix Socket implementation + * [THRIFT-3361] - Improve C# library + * [THRIFT-3393] - Introduce i8 to provide consistent set of Thrift IDL integer types + * [THRIFT-3339] - Support for database/sql + * [THRIFT-3565] - C++: T[Async]Processor::getEventHandler() should be declared as const member functions + * [THRIFT-3563] - C++/Qt: removed usage of macro QT_PREPEND_NAMESPACE as it isn't consequently used for all references to Qt types. + * [THRIFT-3562] - Removed unused TAsyncProcessor::getAsyncServer() + * [THRIFT-3561] - C++/Qt: make use of Q_DISABLE_COPY() to get rid of copy ctor and assignment operator + * [THRIFT-3556] - c_glib file descriptor transport + * [THRIFT-3544] - Make cross test fail when server process died unexpectedly + * [THRIFT-3540] - Make python tutorial more in line with PEP8 + * [THRIFT-3535] - Dart generator argument to produce a file structure usable in parent library + * [THRIFT-3505] - Enhance Python TSSLSocket + * [THRIFT-3506] - Eliminate old style classes from library code + * [THRIFT-3503] - Enable py:utf8string by default + * [THRIFT-3499] - Add package_prefix to python generator + * [THRIFT-3495] - Minor enhancements and fixes for cross test + * [THRIFT-3486] - Java generated `getFieldValue` is incompatible with `setFieldValue` for binary values. + * [THRIFT-3484] - Consolidate temporary buffers in Java's TCompactProtocol + * [THRIFT-3516] - Add feature test for THeader TBinaryProtocol interop + * [THRIFT-3515] - Python 2.6 compatibility and test on CI + * [THRIFT-3514] - PHP 7 compatible version of binary protocol + * [THRIFT-3469] - Docker: Debian support + * [THRIFT-3416] - Retire old "xxx_namespace" declarations from the IDL + * [THRIFT-3426] - Align autogen comment in XSD + * [THRIFT-3424] - Add CMake android build option + * [THRIFT-3439] - Run make cross using Python3 when available + * [THRIFT-3440] - Python make check takes too much time + * [THRIFT-3441] - Stabilize Travis-CI builds + * [THRIFT-3431] - Avoid "schemes" HashMap lookups during struct reads/writes + * [THRIFT-3432] - Add a TByteBuffer transport to the Java library + * [THRIFT-3438] - Enable py:new_style by default + * [THRIFT-3405] - Go THttpClient misuses http.Client objects + * [THRIFT-3614] - Improve logging of test_sslsocket.py + * [THRIFT-3647] - Fix php extension build warnings + * [THRIFT-3642] - Speed up cross test runner + * [THRIFT-3637] - Implement compact protocol for dart + * [THRIFT-3613] - Port Python C extension to Python 3 + * [THRIFT-3612] - Add Python C extension for compact protocol + * [THRIFT-3611] - Add --regex filter to cross test runner + * [THRIFT-3631] - JSON protocol implementation for Lua + * [THRIFT-3609] - Remove or replace TestPortFixture.h + * [THRIFT-3605] - Have the compiler complain about invalid arguments and options + * [THRIFT-3596] - Better conformance to PEP8 + * [THRIFT-3585] - Compact protocol implementation for Lua + * [THRIFT-3582] - Erlang libraries should have service metadata + * [THRIFT-3579] - Introduce retry to make cross + * [THRIFT-3306] - Java: TBinaryProtocol: Use 1 temp buffer instead of allocating 8 + * [THRIFT-3910] - Do not invoke pip as part of build process + * [THRIFT-1857] - Python 3.X Support + * [THRIFT-1944] - Binding to zero port + * [THRIFT-3954] - Enable the usage of structs called "Object" in Java + * [THRIFT-3981] - Enable analyzer strong mode in Dart library + * [THRIFT-3998] - Document ability to add custom tags to thrift structs + * [THRIFT-4006] - Add a removeEventListener method on TSocket + +## New Feature + * [THRIFT-640] - Support deprecation + * [THRIFT-948] - SSL socket support for PHP + * [THRIFT-764] - add Support for Vala language + * [THRIFT-3046] - Allow PSR4 class loading for generated classes (PHP) + * [THRIFT-2113] - Erlang SSL Socket Support + * [THRIFT-1482] - Unix domain socket support under PHP + * [THRIFT-519] - Support collections of types without having to explicitly define it + * [THRIFT-468] - Rack Middleware Application for Rails + * [THRIFT-1708] - Add event handlers for processor events + * [THRIFT-3834] - Erlang namespacing and exception metadata + * [THRIFT-2510] - Implement TNonblockingServer's ability to listen on unix domain sockets + * [THRIFT-3397] - Implement TProcessorFactory in C# to enable per-client processors + * [THRIFT-3523] - XML Generator + * [THRIFT-3510] - Add HttpTaskAsyncHandler implementation + * [THRIFT-3318] - PHP: SimpleJSONProtocol Implementation + * [THRIFT-3299] - Dart language bindings in Thrift + * [THRIFT-2835] - Add possibility to distribute generators separately from thrift core, and load them dynamically + * [THRIFT-184] - Add OSGi Manifest headers to the libthrift java library to be able to use Thrift in the OSGi runtime + * [THRIFT-141] - If a required field is not present on serialization, throw an exception + * [THRIFT-1891] - Add Windows ALPC transport which is right counterpart of Unix domain sockets + +## Question + * [THRIFT-1808] - The Thrift struct should be considered self-contained? + * [THRIFT-2895] - Tutorial cpp + * [THRIFT-3860] - Elephant-bird application Test fails for Thrift + * [THRIFT-3811] - HTTPS Support for C++ applications + * [THRIFT-3509] - "make check" error + +## Story + * [THRIFT-3452] - .travis.yml: Migrating from legacy to container-based infrastructure + +## Sub-task + * [THRIFT-1811] - ruby tutorial as part of the regular build + * [THRIFT-2779] - PHP TJSONProtocol encode unicode into UCS-4LE which can't be parsed by other language bindings + * [THRIFT-2110] - Erlang: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-3852] - A Travis-CI job fails with "write error" + * [THRIFT-3740] - Fix haxelib.json classpath + * [THRIFT-3653] - incorrect union serialization + * [THRIFT-3652] - incorrect serialization of optionals + * [THRIFT-3655] - incorrect union serialization + * [THRIFT-3654] - incorrect serialization of optionals + * [THRIFT-3656] - incorrect serialization of optionals + * [THRIFT-3699] - Fix integer limit symbol includes in Python C extension + * [THRIFT-3693] - Fix include issue in C++ TSSLSocketInterruptTest on Windows + * [THRIFT-3694] - [Windows] Disable tests of a few servers that are not supported + * [THRIFT-3696] - Install pip to CentOS Docker images to fix Python builds + * [THRIFT-3638] - Fix haxelib.json + * [THRIFT-3251] - Add http transport for server to Go lib + * [THRIFT-2424] - Recursive Types + * [THRIFT-2423] - THeader + * [THRIFT-2413] - Python: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-2409] - Java: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-2412] - D: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-2411] - C++: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-2410] - JavaMe: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-2668] - TestSuite: detailed result on passed tests by feature + * [THRIFT-2659] - python Test Server fails when throwing TException + * [THRIFT-3398] - Add CMake build for Haskell library and tests + * [THRIFT-3396] - DART: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-3364] - Fix ruby binary field encoding in TJSONProtocol + * [THRIFT-3381] - Fix for misc. codegen issues with THRIFT-2905 + * [THRIFT-3573] - No rule to make target `../../../test/c_glib/src/.deps/testthrifttest-thrift_test_handler.Po'. + * [THRIFT-3572] - "Unable to determine the behavior of a signed right shift" + * [THRIFT-3542] - Add length limit support to Java test server + * [THRIFT-3537] - Remove the (now obsolete) csharp:asyncctp flag + * [THRIFT-3532] - Add configurable string and container read size limit to Python protocols + * [THRIFT-3531] - Create cross lang feature test for string and container read length limit + * [THRIFT-3482] - Haskell JSON protocol does not encode binary field as Base64 + * [THRIFT-3425] - Minor fixes + simplification for CentOS Dockerfile + * [THRIFT-3442] - Run CMake tests on Appveyor + * [THRIFT-3409] - NodeJS binary field issues + * [THRIFT-3621] - Fix lib/cpp/test/SecurityTest.cpp to use ephemeral ports + * [THRIFT-3628] - Fix lib/cpp/test/TServerIntegrationTest.cpp to use ephemeral ports + * [THRIFT-3625] - Kill unused #include "TestPortFixture.h" in lib/cpp/test/TServerTransportTest.cpp. + * [THRIFT-3646] - Fix Python extension build warnings + * [THRIFT-3626] - Fix lib/cpp/test/TSocketInterruptTest.cpp to use ephemeral ports. + * [THRIFT-3624] - Fix lib/cpp/test/TServerSocketTest.cpp to use ephemeral ports + * [THRIFT-3623] - Fix Fix cpp/lib/test/TSSLSocketInterruptTest.cpp to use ephemeral ports + * [THRIFT-3592] - Add basic test client + * [THRIFT-3980] - add TExtendedBinaryProtocol.java + +## Task + * [THRIFT-1801] - Sync up TApplicationException codes across languages and thrift implementations + * [THRIFT-1259] - Automate versioning + +## Test + * [THRIFT-3400] - Add Erlang to cross test + * [THRIFT-3504] - Fix FastbinaryTest.py + +## Wish + * [THRIFT-3923] - Maybe remove Aereo from the "Powered by" list + * [THRIFT-2149] - Add an option to disable the generation of default operators + + + +Thrift 0.9.3 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-2441] - Cannot shutdown TThreadedServer when clients are still connected + * [THRIFT-2465] - TBinaryProtocolT breaks if copied/moved + * [THRIFT-2474] - thrift.h causes a compile failure + * [THRIFT-2540] - Running configure from outside the source directory fails + * [THRIFT-2598] - Add check for minimum Go version to configure.ac + * [THRIFT-2647] - compiler-hs: don't decapitalize field names, do decapitalize argument bindings + * [THRIFT-2773] - Generated Java code for 'oneway' methods is incorrect. + * [THRIFT-2789] - TNonblockingServer leaks socket FD's under load + * [THRIFT-2682] - TThreadedServer leaks per-thread memory + * [THRIFT-2674] - JavaScript: declare Accept: and Content-Type: in request + * [THRIFT-3078] - TNonblockingServerSocket's logger is not named after TNonblockingServerSocket + * [THRIFT-3077] - C++ TFileTransport ignores return code from ftruncate + * [THRIFT-3067] - C++ cppcheck performance related warnings + * [THRIFT-3066] - C++ TDenseProtocol assert modifies instead of checks + * [THRIFT-3071] - bootstrap.sh on Ubuntu 12.04 (Precise) automake error + * [THRIFT-3069] - C++ TServerSocket leaks socket on fcntl get or set flags error + * [THRIFT-3079] - TNonblockingServerSocket's logger is not named after TNonblockingServerSocket + * [THRIFT-3080] - C++ TNonblockingServer connection leak while accept huge number connections. + * [THRIFT-3086] - C++ Valgrind Error Cleanup + * [THRIFT-3085] - thrift_reconnecting_client never try to reconnect + * [THRIFT-3123] - Missing include in compiler/cpp/src/main.h breaks build in some environments + * [THRIFT-3125] - Fix the list of exported headers in automake input + * [THRIFT-3126] - PHP JSON serializer converts empty or int-indexed maps to lists + * [THRIFT-3132] - Properly format date in Java @Generated annotations + * [THRIFT-3137] - Travis build hangs after failure + * [THRIFT-3138] - "make check" parallel execution is underministic + * [THRIFT-3139] - JS library test is flaky + * [THRIFT-3140] - ConcurrentModificationException is thrown by JavaScript test server + * [THRIFT-3124] - Some signed/unsigned warnings while building compiler + * [THRIFT-3128] - Go generated code produces name collisions between services + * [THRIFT-3146] - Graphviz generates function name collisions between services + * [THRIFT-3147] - Segfault while receiving data + * [THRIFT-3148] - Markdown links to coding_standards are dead + * [THRIFT-3090] - cmake build is broken on MacOSX + * [THRIFT-3097] - cmake targets unconditionally depend on optional libraries + * [THRIFT-3094] - master as of 2015-APR-13 fails -DBOOST_THREADS cmake build + * [THRIFT-3099] - cmake build is broken on FreeBSD + * [THRIFT-3089] - Assigning default ENUM values results in non-compilable java code if java namespace is not defined + * [THRIFT-3093] - mingw compile fixes for c++ library 0.9.2 + * [THRIFT-3098] - Thrift does not pretty print binary typedefs the way it does binary fields + * [THRIFT-3091] - c_glib service method should return result from handler method + * [THRIFT-3088] - TThreadPoolServer with Sasl auth may leak CLOSE_WAIT socket + * [THRIFT-3109] - Cross test log file cannot be browsed when served in HTTP server + * [THRIFT-3113] - m4 C++11 macro issue + * [THRIFT-3105] - C++ libthriftnb library on Windows build failure + * [THRIFT-3115] - Uncompileable code due to name collision with predefined used types + * [THRIFT-3117] - Java TSSLTransportFactory can't load certificates within JAR archive + * [THRIFT-3102] - could not make check for Go Library + * [THRIFT-3120] - Minor spelling errors and an outdated URL + * [THRIFT-3121] - Librt does not exist on OS X + * [THRIFT-3152] - Compiler error on Mac OSX (missing #include ) + * [THRIFT-3162] - make fails for dmd 2.067 + * [THRIFT-3164] - Thrift C++ library SSL socket by default allows for unsecure SSLv3 negotiation + * [THRIFT-3168] - Fix Maven POM + * [THRIFT-3170] - Initialism code in the Go compiler causes chaos + * [THRIFT-3169] - Do not export thrift.TestStruct and thrift.TestEnum in thrift Go library + * [THRIFT-3191] - Perl compiler does not add support for unexpected exception handling + * [THRIFT-3178] - glib C does not compile + * [THRIFT-3189] - Perl ServerSocket should allow a specific interface to be listened to + * [THRIFT-3252] - Missing TConcurrentClientSyncInfo.h in cpp Makefile, so doesn't install + * [THRIFT-3255] - Thrift generator doesn't exclude 'package' keyword for thrift property names breaking java builds + * [THRIFT-3260] - multiple warnings in c_glib tutorial + * [THRIFT-3256] - Some D test timings are too aggressive for slow machines + * [THRIFT-3257] - warning: extra tokens at end of #endif directive + * [THRIFT-3184] - Thrift Go leaves file descriptors open + * [THRIFT-3203] - DOAP - please fix "Ocaml" => "OCaml" + * [THRIFT-3210] - (uncompileable) code generated for server events while are events not enabled + * [THRIFT-3215] - TJSONProtocol '(c++) uses "throw new" to throw exceptions instead of "throw" + * [THRIFT-3202] - Allow HSHAServer to configure min and max worker threads separately. + * [THRIFT-3205] - TCompactProtocol return a wrong error when the io.EOF happens + * [THRIFT-3209] - LGPL mentioned in license file + * [THRIFT-3197] - keepAliveTime is hard coded as 60 sec in TThreadPoolServer + * [THRIFT-3196] - Misspelling in lua TBinaryProtocol (stirctWrite => strictWrite) + * [THRIFT-3198] - Allow construction of TTransportFactory with a specified maxLength + * [THRIFT-3192] - Go import paths changed in 1.4, and expired June 1 + * [THRIFT-3271] - Could not find or load main class configtest_ax_javac_and_java on some non-english systems + * [THRIFT-3273] - c_glib: Generated code tries to convert between function and void pointers + * [THRIFT-3264] - Fix Erlang 16 namespaced types + * [THRIFT-3270] - reusing TNonblockingServer::TConnection cause dirty TSocket + * [THRIFT-3267] - c_glib: "Critical" failure during unit tests + * [THRIFT-3277] - THttpClient leaks connections if it's used for multiple requests + * [THRIFT-3278] - NodeJS: Fix exception stack traces and names + * [THRIFT-3279] - Fix a bug in retry_max_delay (NodeJS) + * [THRIFT-3280] - Initialize retry variables on construction + * [THRIFT-3283] - c_glib: Tutorial server always exits with warning + * [THRIFT-3284] - c_glib: Empty service produces unused-variable warning + * [THRIFT-1925] - c_glib generated code does not compile + * [THRIFT-1849] - after transport->open() opens isOpen returns true and next open() goes thru when it shall not + * [THRIFT-1866] - java compiler generates non-compiling code with const's defined in a thrift when name includes non-identifier chars + * [THRIFT-1938] - FunctionRunner.h -- uses wrong path for Thread.h when installed + * [THRIFT-1844] - Password string not cleared + * [THRIFT-2004] - Thrift::Union violates :== method contract and crashes + * [THRIFT-2073] - Thrift C++ THttpClient error: cannot refill buffer + * [THRIFT-2127] - Autoconf scripting does not properly account for cross-compile + * [THRIFT-2180] - Integer types issues in Cocoa lib on ARM64 + * [THRIFT-2189] - Go needs "isset" to fully support "union" type (and optionals) + * [THRIFT-2192] - autotools on Redhat based systems + * [THRIFT-2546] - cross language tests fails at 'TestMultiException' when using nodejs server + * [THRIFT-2547] - nodejs servers and clients fails to connect with cpp using compact protocol + * [THRIFT-2548] - Nodejs servers and clients does not work properly with -ssl + * [THRIFT-1471] - toString() does not print ByteBuffer values when nested in a List + * [THRIFT-1201] - getaddrinfo resource leak + * [THRIFT-615] - TThreadPoolServer doesn't call task_done after pulling tasks from it's clients queue + * [THRIFT-162] - Thrift structures are unhashable, preventing them from being used as set elements + * [THRIFT-810] - Crashed client on TSocket::close under loads + * [THRIFT-557] - charset problem with file Autogenerated by Thrift + * [THRIFT-233] - IDL doesn't support negative hex literals + * [THRIFT-1649] - contrib/zeromq does not build in 0.8.0 + * [THRIFT-1642] - Miscalculation lead to throw unexpected "TTransportException::TIMED_OUT"(or called "EAGAIN (timed out)") exception + * [THRIFT-1587] - TSocket::setRecvTimeout error + * [THRIFT-1248] - pointer subtraction in TMemoryBuffer relies on undefined behavior + * [THRIFT-1774] - Sasl Transport client would hang when trying to connect non-sasl transport server + * [THRIFT-1754] - RangeError in buffer handling + * [THRIFT-1618] - static structMap in FieldMetaData is not thread safe and can lead to deadlocks + * [THRIFT-2335] - thrift incompatibility with py:tornado as server, java as client + * [THRIFT-2803] - TCP_DEFER_ACCEPT not supported with domain sockets + * [THRIFT-2799] - Build Problem(s): ld: library not found for -l:libboost_unit_test_framework.a + * [THRIFT-2801] - C++ test suite compilation warnings + * [THRIFT-2802] - C++ tutorial compilation warnings + * [THRIFT-2795] - thrift_binary_protocol.c: 'dereferencing type-punned pointer will break strict-aliasing rules' + * [THRIFT-2817] - TSimpleJSONProtocol reads beyond end of message + * [THRIFT-2826] - html:standalone sometimes ignored + * [THRIFT-2829] - Support haxelib installation via github + * [THRIFT-2828] - slightly wrong help screen indent + * [THRIFT-2831] - Removes dead code in web_server.js introduced in THRIFT-2819 + * [THRIFT-2823] - All JS-tests are failing when run with grunt test + * [THRIFT-2827] - Thrift 0.9.2 fails to compile on Yosemite due to tr1/functional include in ProcessorTest.cpp + * [THRIFT-2843] - Automake configure.ac has possible typo related to Java + * [THRIFT-2813] - multiple haxe library fixes/improvements + * [THRIFT-2825] - Supplying unicode to python Thrift client can cause next request arguments to get overwritten + * [THRIFT-2840] - Cabal file points to LICENSE file outside the path of the Haskell project. + * [THRIFT-2818] - Trailing commas in array + * [THRIFT-2830] - Clean up ant warnings in tutorial dir + * [THRIFT-2842] - Erlang thrift client has infinite timeout + * [THRIFT-2810] - Do not leave the underlying ServerSocket open if construction of TServerSocket fails + * [THRIFT-2812] - Go server adding redundant buffering layer + * [THRIFT-2839] - TFramedTransport read bug + * [THRIFT-2844] - Nodejs support broken when running under Browserify + * [THRIFT-2814] - args/result classes not found when no namespace is set + * [THRIFT-2847] - function IfValue() is a duplicate of System.StrUtils.IfThen + * [THRIFT-2848] - certain Delphi tests do not build if TypeRegistry is used + * [THRIFT-2854] - Go Struct writer and reader looses important error information + * [THRIFT-2858] - Enable header field case insensitive match in THttpServer + * [THRIFT-2857] - C# generator creates uncompilable code for struct constants + * [THRIFT-2860] - Delphi server closes connection on unexpected exceptions + * [THRIFT-2868] - Enhance error handling in the Go client + * [THRIFT-2879] - TMemoryBuffer: using lua string in wrong way + * [THRIFT-2851] - Remove strange public Peek() from Go transports + * [THRIFT-2852] - Better Open/IsOpen/Close behavior for StreamTransport. + * [THRIFT-2871] - Missing semicolon in thrift.js + * [THRIFT-2872] - ThreadManager deadlock for task expiration + * [THRIFT-2881] - Handle errors from Accept() correctly + * [THRIFT-2849] - Spell errors reported by codespell tool + * [THRIFT-2870] - C++ TJSONProtocol using locale dependent formatting + * [THRIFT-2882] - Lua Generator: using string.len funtion to get struct(map,list,set) size + * [THRIFT-2864] - JSON generator missing from Visual Studio build project + * [THRIFT-2878] - Go validation support of required fields + * [THRIFT-2873] - TPipe and TPipeServer don't compile on Windows with UNICODE enabled + * [THRIFT-2888] - import of is missing in JSON generator + * [THRIFT-2900] - Python THttpClient does not reset socket timeout on exception + * [THRIFT-2907] - 'ntohll' macro redefined + * [THRIFT-2884] - Map does not serialize correctly for JSON protocol in Go library + * [THRIFT-2887] - --with-openssl configure flag is ignored + * [THRIFT-2894] - PHP json serializer skips maps with int/bool keys + * [THRIFT-2904] - json_protocol_test.go fails + * [THRIFT-2906] - library not found for -l:libboost_unit_test_framework.a + * [THRIFT-2890] - binary data may lose bytes with JSON transport under specific circumstances + * [THRIFT-2891] - binary data may cause a failure with JSON transport under specific circumstances + * [THRIFT-2901] - Fix for generated TypeScript functions + indentation of JavaScript maps + * [THRIFT-2916] - make check fails for D language + * [THRIFT-2918] - Race condition in Python TProcessPoolServer test + * [THRIFT-2920] - Erlang Thrift test uses wrong IDL file + * [THRIFT-2922] - $TRIAL is used with Python tests but not tested accordingly + * [THRIFT-2912] - Autotool build for C++ Qt library is invalid + * [THRIFT-2914] - explicit dependency to Lua5.2 fails on some systems + * [THRIFT-2910] - libevent is not really optional + * [THRIFT-2911] - fix c++ version zeromq transport, the old version cannot work + * [THRIFT-2915] - Lua generator missing from Visual Studio build project + * [THRIFT-2917] - "make clean" breaks test/c_glib + * [THRIFT-2919] - Haxe test server timeout too large + * [THRIFT-2923] - JavaScript client assumes a message being written + * [THRIFT-2924] - TNonblockingServer crashes when user-provided event_base is used + * [THRIFT-2925] - CMake build does not work with OpenSSL nor anything installed in non-system location + * [THRIFT-2931] - Access to undeclared static property: Thrift\Protocol\TProtocol::$TBINARYPROTOCOLACCELERATED + * [THRIFT-2893] - CMake build fails with boost thread or std thread + * [THRIFT-2902] - Generated c_glib code does not compile with clang + * [THRIFT-2903] - Qt4 library built with CMake does not work + * [THRIFT-2942] - CSharp generate invalid code for property named read or write + * [THRIFT-2932] - Node.js Thrift connection libraries throw Exceptions into event emitter + * [THRIFT-2933] - v0.9.2: doubles encoded in node with compact protocol cannot be decoded by python + * [THRIFT-2934] - createServer signature mismatch + * [THRIFT-2981] - IDL with no namespace produces unparsable PHP + * [THRIFT-2999] - Addition of .gitattributes text auto in THRIFT-2724 causes modified files on checkout + * [THRIFT-2949] - typo in compiler/cpp/README.md + * [THRIFT-2957] - warning: source file %s is in a subdirectory, but option 'subdir-objects' is disabled + * [THRIFT-2953] - TNamedPipeServerTransport is not Stop()able + * [THRIFT-2962] - Docker Thrift env for development and testing + * [THRIFT-2971] - C++ test and tutorial parallel build is unstable + * [THRIFT-2972] - Missing backslash in lib/cpp/test/Makefile.am + * [THRIFT-2951] - Fix Erlang name conflict test + * [THRIFT-2955] - Using list of typedefs does not compile on Go + * [THRIFT-2960] - namespace regression for Ruby + * [THRIFT-2959] - nodejs: fix binary unit tests + * [THRIFT-2966] - nodejs: Fix bad references to TProtocolException and TProtocolExceptionType + * [THRIFT-2970] - grunt-jsdoc fails due to dependency issues + * [THRIFT-3001] - C# Equals fails for binary fields (byte[]) + * [THRIFT-3003] - Missing LICENSE file prevents package from being installed + * [THRIFT-3008] - Node.js server does not fully support exception + * [THRIFT-3007] - Travis build is broken because of directory conflict + * [THRIFT-3009] - TSSLSocket does not use the correct hostname (breaks certificate checks) + * [THRIFT-3011] - C# test server testException() not implemented according to specs + * [THRIFT-3012] - Timing problems in NamedPipe implementation due to unnecessary open/close + * [THRIFT-3019] - Golang generator missing docstring for structs + * [THRIFT-3021] - Service remote tool does not import stub package with package prefix + * [THRIFT-3026] - TMultiplexedProcessor does not have a constructor + * [THRIFT-3028] - Regression caused by THRIFT-2180 + * [THRIFT-3017] - order of map key/value types incorrect for one CTOR + * [THRIFT-3020] - Cannot compile thrift as C++03 + * [THRIFT-3024] - User-Agent "BattleNet" used in some Thrift library files + * [THRIFT-3047] - Uneven calls to indent_up and indent_down in Cocoa generator + * [THRIFT-3048] - NodeJS decoding of I64 is inconsistent across protocols + * [THRIFT-3043] - go compiler generator uses non C++98 code + * [THRIFT-3044] - Docker README.md paths to Dockerfiles are incorrect + * [THRIFT-3040] - bower.json wrong "main" path + * [THRIFT-3051] - Go Thrift generator creates bad go code + * [THRIFT-3057] - Java compiler build is broken + * [THRIFT-3061] - C++ TSSLSocket shutdown delay/vulnerability + * [THRIFT-3062] - C++ TServerSocket invalid port number (over 999999) causes stack corruption + * [THRIFT-3065] - Update libthrift dependencies (slf4j, httpcore, httpclient) + * [THRIFT-3244] - TypeScript: fix namespace of included types + * [THRIFT-3246] - Reduce the number of trivial warnings in Windows C++ CMake builds + * [THRIFT-3224] - Fix TNamedPipeServer unpredictable behavior on accept + * [THRIFT-3230] - Python compiler generates wrong code if there is function throwing a typedef of exception with another namespace + * [THRIFT-3236] - MaxSkipDepth never checked + * [THRIFT-3239] - Limit recursion depth + * [THRIFT-3241] - fatal error: runtime: cannot map pages in arena address space + * [THRIFT-3242] - OSGi Import-Package directive is missing the Apache HTTP packages + * [THRIFT-3234] - Limit recursion depth + * [THRIFT-3222] - TypeScript: Generated Enums are quoted + * [THRIFT-3229] - unexpected Timeout exception when desired bytes are only partially available + * [THRIFT-3231] - CPP: Limit recursion depth to 64 + * [THRIFT-3235] - Limit recursion depth + * [THRIFT-3175] - fastbinary.c python deserialize can cause huge allocations from garbage + * [THRIFT-3176] - Union incorrectly implements == + * [THRIFT-3177] - Fails to run rake test + * [THRIFT-3180] - lua plugin: framed transport do not work + * [THRIFT-3179] - lua plugin cant connect to remote server because function l_socket_create_and_connect always bind socket to localhost + * [THRIFT-3248] - TypeScript: additional comma in method signature without parameters + * [THRIFT-3302] - Go JSON protocol should encode Thrift byte type as signed integer string + * [THRIFT-3297] - c_glib: an abstract base class is not generated + * [THRIFT-3294] - TZlibTransport for Java does not write data correctly + * [THRIFT-3296] - Go cross test does not conform to spec + * [THRIFT-3295] - C# library does not build on Mono 4.0.2.5 or later + * [THRIFT-3293] - JavaScript: null values turn into empty structs in constructor + * [THRIFT-3310] - lib/erl/README.md has incorrect formatting + * [THRIFT-3319] - CSharp tutorial will not build using the *.sln + * [THRIFT-3335] - Ruby server does not handle processor exception + * [THRIFT-3338] - Stray underscore in generated go when service name starts with "New" + * [THRIFT-3324] - Update Go Docs for pulling all packages + * [THRIFT-3345] - Clients blocked indefinitely when a java.lang.Error is thrown + * [THRIFT-3332] - make dist fails on clean build + * [THRIFT-3326] - Tests do not compile under *BSD + * [THRIFT-3334] - Markdown notation of protocol spec is malformed + * [THRIFT-3331] - warning: ‘etype’ may be used uninitialized in this function + * [THRIFT-3349] - Python server does not handle processor exception + * [THRIFT-3343] - Fix haskell README + * [THRIFT-3340] - Python: enable json tests again + * [THRIFT-3311] - Top level README.md has incorrect formmating + * [THRIFT-2936] - Minor memory leak in SSL + * [THRIFT-3290] - Using from in variable names causes the generated Python code to have errors + * [THRIFT-3225] - Fix TPipeServer unpredictable behavior on interrupt() + * [THRIFT-3354] - Fix word-extraction substr bug in initialism code + * [THRIFT-2006] - TBinaryProtocol message header call name length is not validated and can be used to core the server + * [THRIFT-3329] - C++ library unit tests don't compile against the new boost-1.59 unit test framework + * [THRIFT-2630] - windows7 64bit pc. ipv4 and ipv6 pc.can't use + * [THRIFT-3336] - Thrift generated streaming operators added in 0.9.2 cannot be overridden + * [THRIFT-2681] - Core of unwind_cleanup + * [THRIFT-3317] - cpp namespace org.apache issue appears in 0.9 + +## Documentation + * [THRIFT-3286] - Apache Ant is a necessary dependency + +## Improvement + * [THRIFT-227] - Byte[] in collections aren't pretty printed like regular binary fields + * [THRIFT-2744] - Vagrantfile for Centos 6.5 + * [THRIFT-2644] - Haxe support + * [THRIFT-2756] - register Media Type @ IANA + * [THRIFT-3076] - Compatibility with Haxe 3.2.0 + * [THRIFT-3081] - C++ Consolidate client processing loops in TServers + * [THRIFT-3083] - C++ Consolidate server processing loops in TSimpleServer, TThreadedServer, TThreadPoolServer + * [THRIFT-3084] - C++ add concurrent client limit to threaded servers + * [THRIFT-3074] - Add compiler/cpp/lex.yythriftl.cc to gitignore. + * [THRIFT-3134] - Remove use of deprecated "phantom.args" + * [THRIFT-3133] - Allow "make cross" and "make precross" to run without building all languages + * [THRIFT-3142] - Make JavaScript use downloaded libraries + * [THRIFT-3141] - Improve logging of JavaScript test + * [THRIFT-3144] - Proposal: make String representation of enums in generated go code less verbose + * [THRIFT-3130] - Remove the last vestiges of THRIFT_OVERLOAD_IF from THRIFT-1316 + * [THRIFT-3131] - Consolidate suggested import path for go thrift library to git.apache.org in docs and code + * [THRIFT-3092] - Generated Haskell types should derive Generic + * [THRIFT-3110] - Print error log after cross test failures on Travis + * [THRIFT-3114] - Using local temp variables to not pollute the global table + * [THRIFT-3106] - CMake summary should give more information why a library is set to off + * [THRIFT-3119] - Java's TThreadedSelectorServer has indistinguishable log messages in run() + * [THRIFT-3122] - Javascript struct constructor should properly initialize struct and container members from plain js arguments + * [THRIFT-3151] - Fix links to git-wip* - should be git.apache.org + * [THRIFT-3167] - Windows build from source instructions need to be revised + * [THRIFT-3155] - move contrib/mingw32-toolchain.cmake to build/cmake/ + * [THRIFT-3160] - Make generated go enums implement TextMarshaller and TextUnmarshaller interfaces + * [THRIFT-3150] - Add an option to thrift go generator to make Read and Write methods private + * [THRIFT-3149] - Make ReadFieldN methods in generated Go code private + * [THRIFT-3172] - Add tutorial to Thrift web site + * [THRIFT-3214] - Add Erlang option for using maps instead of dicts + * [THRIFT-3201] - Capture github test artifacts for failed builds + * [THRIFT-3266] - c_glib: Multiple compiler warnings building unit tests + * [THRIFT-3285] - c_glib: Build library with all warnings enabled, no warnings generated + * [THRIFT-1954] - Allow for a separate connection timeout value + * [THRIFT-2098] - Add support for Qt5+ + * [THRIFT-2199] - Remove Dense protocol (was: move to Contrib) + * [THRIFT-406] - C++ Test suite cleanup + * [THRIFT-902] - socket and connect timeout in TSocket should be distinguished + * [THRIFT-388] - Use a separate wire format for async calls + * [THRIFT-727] - support native C++ language specific exception message + * [THRIFT-1784] - pep-3110 compliance for exception handling + * [THRIFT-1025] - C++ ServerSocket should inherit from Socket with the necessary Ctor to listen on connections from a specific host + * [THRIFT-2269] - Can deploy libthrift-source.jar to maven center repository + * [THRIFT-2804] - Pull an interface out of TBaseAsyncProcessor + * [THRIFT-2806] - more whitespace fixups + * [THRIFT-2811] - Make remote socket address accessible + * [THRIFT-2809] - .gitignore update for compiler's visual project + * [THRIFT-2846] - Expose ciphers parameter from ssl.wrap_socket() + * [THRIFT-2859] - JSON generator: output complete descriptors + * [THRIFT-2861] - add buffered transport + * [THRIFT-2865] - Test case for Go: SeqId out of sequence + * [THRIFT-2866] - Go generator source code is hard to read and maintain + * [THRIFT-2880] - Read the network address from the listener if available. + * [THRIFT-2875] - Typo in TDenseProtocol.h comment + * [THRIFT-2874] - TBinaryProtocol member variable "string_buf_" is never used. + * [THRIFT-2855] - Move contributing.md to the root of the repository + * [THRIFT-2862] - Enable RTTI and/or build macros for generated code + * [THRIFT-2876] - Add test for THRIFT-2526 Assignment operators and copy constructors in c++ don't copy the __isset struct + * [THRIFT-2897] - Generate -isEqual: and -hash methods + * [THRIFT-2909] - Improve travis build + * [THRIFT-2921] - Make Erlang impl ready for OTP 18 release (dict/0 and set/0 are deprecated) + * [THRIFT-2928] - Rename the erlang test_server module + * [THRIFT-2940] - Allow installing Thrift from git as NPM module by providing package.json in top level directory + * [THRIFT-2937] - Allow setting a maximum frame size in TFramedTransport + * [THRIFT-2976] - nodejs: xhr and websocket support for browserify + * [THRIFT-2996] - Test for Haxe 3.1.3 or better + * [THRIFT-2969] - nodejs: DRY up library tests + * [THRIFT-2973] - Update Haxe lib readme regarding Haxe 3.1.3 + * [THRIFT-2952] - Improve handling of Server.Stop() + * [THRIFT-2964] - nodejs: move protocols and transports into separate files + * [THRIFT-2963] - nodejs - add test coverage + * [THRIFT-3006] - Attach 'omitempty' json tag for optional fields in Go + * [THRIFT-3027] - Go compiler does not ensure common initialisms have consistent case + * [THRIFT-3030] - TThreadedServer: Property for number of clientThreads + * [THRIFT-3023] - Go compiler is a little overly conservative with names of attributes + * [THRIFT-3018] - Compact protocol for Delphi + * [THRIFT-3025] - Change pure Int constants into @enums (where possible) + * [THRIFT-3031] - migrate "shouldStop" flag to TServer + * [THRIFT-3022] - Compact protocol for Haxe + * [THRIFT-3041] - Generate asynchronous clients for Cocoa + * [THRIFT-3053] - Perl SSL Socket Support (Encryption) + * [THRIFT-3247] - Generate a C++ thread-safe client + * [THRIFT-3217] - Provide a little endian variant of the binary protocol in C++ + * [THRIFT-3223] - TypeScript: Add initial support for Enum Maps + * [THRIFT-3220] - Option to suppress @Generated Annotation entirely + * [THRIFT-3300] - Reimplement TZlibTransport in Java using streams + * [THRIFT-3288] - c_glib: Build unit tests with all warnings enabled, no warnings generated + * [THRIFT-3347] - Improve cross test servers and clients + * [THRIFT-3342] - Improve ruby cross test client and server compatibility + * [THRIFT-2296] - Add C++ Base class for service + * [THRIFT-3337] - Add testBool method to cross tests + * [THRIFT-3303] - Disable concurrent cabal jobs on Travis to avoid GHC crash + * [THRIFT-2623] - Docker container for Thrift + * [THRIFT-3298] - thrift endian converters may conflict with other libraries + * [THRIFT-1559] - Provide memory pool for TBinaryProtocol to eliminate memory fragmentation + * [THRIFT-424] - Steal ProtocolBuffers' VarInt implementation for C++ + +## New Feature + * [THRIFT-3070] - Add ability to set the LocalCertificateSelectionCallback + * [THRIFT-1909] - Java: Add compiler flag to use the "option pattern" for optional fields + * [THRIFT-2099] - Stop TThreadPoolServer with alive connections. + * [THRIFT-123] - implement TZlibTransport in Java + * [THRIFT-2368] - New option: reuse-objects for Java generator + * [THRIFT-2836] - Optionally generate C++11 MoveConstructible types + * [THRIFT-2824] - Flag to disable html escaping doctext + * [THRIFT-2819] - Add WebsSocket client to node.js + * [THRIFT-3050] - Client certificate authentication for non-http TLS in C# + * [THRIFT-3292] - Implement TZlibTransport in Go + +## Question + * [THRIFT-2583] - Thrift on xPC target (SpeedGoat) + * [THRIFT-2592] - thrift server using c_glib + * [THRIFT-2832] - c_glib: Handle string lists correctly + * [THRIFT-3136] - thrift installation problem on mac + * [THRIFT-3346] - c_glib: Tutorials example crashes saying Calculator.ping implementation returned FALSE but did not set an error + +## Sub-task + * [THRIFT-2578] - Moving 'make cross' from test.sh to test.py + * [THRIFT-2734] - Go coding standards + * [THRIFT-2748] - Add Vagrantfile for Centos 6.5 + * [THRIFT-2753] - Misc. Haxe improvements + * [THRIFT-2640] - Compact Protocol in Cocoa + * [THRIFT-3262] - warning: overflow in implicit constant conversion in DenseProtoTest.cpp + * [THRIFT-3194] - Can't build with go enabled. gomock SCC path incorrect. + * [THRIFT-3275] - c_glib tutorial warnings in generated code + * [THRIFT-1125] - Multiplexing support for the Ruby Library + * [THRIFT-2807] - PHP Code Style + * [THRIFT-2841] - Add comprehensive integration tests for the whole Go stack + * [THRIFT-2815] - Haxe: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-2886] - Integrate binary type in standard Thrift cross test + * [THRIFT-2946] - Enhance usability of cross test framework + * [THRIFT-2967] - Add .editorconfig to root + * [THRIFT-3033] - Perl: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-3174] - Initialism code in the Go compiler doesn't check first word + * [THRIFT-3193] - Option to supress date value in @Generated annotation + * [THRIFT-3305] - Missing dist files for 0.9.3 release candidate + * [THRIFT-3341] - Add testBool methods + * [THRIFT-3308] - Fix broken test cases for 0.9.3 release candidate + +## Task + * [THRIFT-2834] - Remove semi-colons from python code generator + * [THRIFT-2853] - Adjust comments not applying anymore after THRIFT-2852 + +## Test + * [THRIFT-3211] - Add make cross support for php TCompactProtocol + +## Wish + * [THRIFT-2838] - TNonblockingServer can bind to port 0 (i.e., get an OS-assigned port) but there is no way to get the port number + + + +Thrift 0.9.2 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-2793] - Go compiler produces uncompilable code + * [THRIFT-1481] - Unix domain sockets in C++ do not support the abstract namespace + * [THRIFT-1455] - TBinaryProtocolT::writeString casts from size_t to uint32_t, which is not safe on 64-bit platforms + * [THRIFT-1579] - PHP Extention - function thrift_protocol_read_binary not working from TBinarySerializer::deserialize + * [THRIFT-1584] - Error: could not SetMinThreads in ThreadPool on single-core machines + * [THRIFT-1614] - Thrift build from svn repo sources fails with automake-1.12 + * [THRIFT-1047] - rb_thrift_memory_buffer_write treats arg as string without check, segfaults if you pass non-string + * [THRIFT-1639] - Java/Python: Serialization/Deserialization of double type using CompactProtocol + * [THRIFT-1647] - NodeJS BufferedTransport does not work beyond the hello-world example + * [THRIFT-2130] - Thrift's D library/test: parts of "make check" code do not compile with recent dmd-2.062 through dmd-2.064alpha + * [THRIFT-2140] - Error compiling cpp tutorials + * [THRIFT-2139] - MSVC 2012 Error - Cannot compile due to BoostThreadFactory + * [THRIFT-2138] - pkgconfig file created with wrong include path + * [THRIFT-2160] - Warning in thrift.h when compiling with -Wunused and NDEBUG + * [THRIFT-2158] - Compact, JSON, and SimpleJSON protocols are not working correctly + * [THRIFT-2167] - nodejs lib throws error if options argument isn't passed + * [THRIFT-2288] - Go impl of Thrift JSON protocol wrongly writes/expects true/false for bools + * [THRIFT-2147] - Thrift IDL grammar allows for dotted identifier names + * [THRIFT-2145] - Rack and Thin are not just development dependencies + * [THRIFT-2267] - Should be able to choose socket family in Python TSocket + * [THRIFT-2276] - java path in spec file needs updating + * [THRIFT-2281] - Generated send/recv code ignores errors returned by the underlying protocol + * [THRIFT-2280] - TJSONProtocol.Flush() does not really flush the transport + * [THRIFT-2274] - TNonblockingServer and TThreadedSelectorServer do not close their channel selectors on exit and leak file descriptors + * [THRIFT-2265] - php library doesn't build + * [THRIFT-2232] - IsSet* broken in Go + * [THRIFT-2246] - Unset enum value is printed by ToString() + * [THRIFT-2240] - thrift.vim (contrib) does not correctly handle 'union' + * [THRIFT-2243] - TNonblockingServer in thrift crashes when TFramedTransport opens + * [THRIFT-2230] - Cannot Build on RHEL/Centos/Amazon Linux 6.x + * [THRIFT-2247] - Go generator doesn't deal well with map keys of type binary + * [THRIFT-2253] - Python Tornado TTornadoServer base class change + * [THRIFT-2261] - java: error: unmappable character for encoding ASCII + * [THRIFT-2259] - C#: unexpected null logDelegate() pointer causes AV in TServer.serve() + * [THRIFT-2225] - SSLContext destroy before cleanupOpenSSL + * [THRIFT-2224] - TSSLSocket.h and TSSLServerSocket.h should use the platfromsocket too + * [THRIFT-2229] - thrift failed to build on OSX 10.9 GM + * [THRIFT-2227] - Thrift compiler generates spurious warnings with Xlint + * [THRIFT-2219] - Thrift gem fails to build on OS X Mavericks with 1.9.3 rubies + * [THRIFT-2226] - TServerSocket - keepAlive wrong initialization order + * [THRIFT-2285] - TJsonProtocol implementation for Java doesn't allow a slash (/) to be escaped (\/) + * [THRIFT-2216] - Extraneous semicolon in TProtocolUtil.h makes clang mad + * [THRIFT-2215] - Generated HTML/Graphviz lists referenced enum identifiers as UNKNOWN. + * [THRIFT-2211] - Exception constructor does not contain namespace prefix. + * [THRIFT-2210] - lib/java TSimpleJSONProtocol can emit invalid JSON + * [THRIFT-2209] - Ruby generator -- please namespace classes + * [THRIFT-2202] - Delphi TServerImpl.DefaultLogDelegate may stop the server with I/O-Error 105 + * [THRIFT-2201] - Ternary operator returns different types (build error for some compilers) + * [THRIFT-2200] - nested structs cause generate_fingerprint() to slow down at excessive CPU load + * [THRIFT-2197] - fix jar output directory in rpm spec file + * [THRIFT-2196] - Fix invalid dependency in Makefile.am + * [THRIFT-2194] - Node: Not actually prepending residual data in TFramedTransport.receiver + * [THRIFT-2193] - Java code generator emits spurious semicolon when deep copying binary data + * [THRIFT-2191] - Fix charp JSONProtocol.ReadJSONDouble (specify InvariantCulture) + * [THRIFT-2214] - System header sys/param.h is included inside the Thrift namespace + * [THRIFT-2178] - Thrift generator returns error exit code on --version + * [THRIFT-2171] - NodeJS implementation has extremely low test coverage + * [THRIFT-2183] - gem install fails on zsh + * [THRIFT-2182] - segfault in regression tests (GC bug in rb_thrift_memory_buffer_write) + * [THRIFT-2181] - oneway calls don't work in NodeJS + * [THRIFT-2169] - JavaME Thrift Library causes "java.io.IOException: No Response Entries Available" after using the Thrift client for some time + * [THRIFT-2168] - Node.js appears broken (at least, examples don't work as intended) + * [THRIFT-2293] - TSSLTransportFactory.createSSLContext() leaves files open + * [THRIFT-2279] - TSerializer only returns the first 1024 bytes serialized + * [THRIFT-2278] - Buffered transport doesn't support writes > buffer size + * [THRIFT-2275] - Fix memory leak in golang compact_protocol. + * [THRIFT-2282] - Incorect code generated for some typedefs + * [THRIFT-2009] - Go redeclaration error + * [THRIFT-1964] - 'Isset' causes problems with C#/.NET serializers + * [THRIFT-2026] - Fix TCompactProtocol 64 bit builds + * [THRIFT-2108] - Fix TAsyncClientManager timeout race + * [THRIFT-2068] - Multiple calls from same connection are not processed in node + * [THRIFT-1750] - Make compiler build cleanly under visual studio 10 + * [THRIFT-1755] - Comment parsing bug + * [THRIFT-1771] - "make check" fails on x64 for libboost_unit_test_framework.a + * [THRIFT-1841] - NodeJS Thrift incorrectly parses non-UTF8-string types + * [THRIFT-1908] - Using php thrift_protocol accelerated transfer causes core dump + * [THRIFT-1892] - Socket timeouts are declared in milli-seconds, but are actually set in micro-seconds + * [THRIFT-2303] - TBufferredTransport not properly closing underlying transport + * [THRIFT-2313] - nodejs server crash after processing the first request when using MultiplexedProcessor/FramedBuffer/BinaryProtocol + * [THRIFT-2311] - Go: invalid code generated when exception name is a go keyword + * [THRIFT-2308] - node: TJSONProtocol parse error when reading from buffered message + * [THRIFT-2316] - ccp: TFileTransportTest + * [THRIFT-2352] - msvc failed to compile thrift tests + * [THRIFT-2337] - Golang does not report TIMED_OUT exceptions + * [THRIFT-2340] - Generated server implementation does not send response type EXCEPTION on the Thrift.TApplicationExceptionType.UNKNOWN_METHOD exception + * [THRIFT-2354] - Connection errors can lead to case_clause exceptions + * [THRIFT-2339] - Uncaught exception in thrift c# driver + * [THRIFT-2356] - c++ thrift client not working with ssl (SSL_connect hangs) + * [THRIFT-2331] - Missing call to ReadStructBegin() in TApplicationException.Read() + * [THRIFT-2323] - Uncompileable Delphi code generated for typedef'd structs + * [THRIFT-2322] - Correctly show the number of times ExecutorService (java) has rejected the client. + * [THRIFT-2389] - namespaces handled wrongly in acrionscript 3.0 implementation + * [THRIFT-2388] - GoLang - Fix data races in simple_server and server_socket + * [THRIFT-2386] - Thrift refuses to link yylex + * [THRIFT-2375] - Excessive
's in generated HTML + * [THRIFT-2373] - warning CS0414 in THttpClient.cs: private field 'Thrift.Transport.THttpClient.connection' assigned but never used + * [THRIFT-2372] - thrift/json_protocol.go:160: function ends without a return statement + * [THRIFT-2371] - ruby bundler version fails on ~1.3.1, remove and take latest avail + * [THRIFT-2370] - Compiler SEGFAULTs generating HTML documentation for complex strucre + * [THRIFT-2384] - Binary map keys produce uncompilable code in go + * [THRIFT-2380] - unreachable code (CID 1174546, CID 1174679) + * [THRIFT-2378] - service method arguments of binary type lead to uncompileable Go code + * [THRIFT-2363] - Issue with character encoding of Success returned from Login using Thrift Proxy and NodeJS + * [THRIFT-2359] - TBufferedTransport doesn't clear it's buffer on a failed flush call + * [THRIFT-2428] - Python 3 setup.py support + * [THRIFT-2367] - Build failure: stdlib and boost both define uint64_t + * [THRIFT-2365] - C# decodes too many binary bytes from JSON + * [THRIFT-2402] - byte count of FrameBuffer in AWAITING_CLOSE state is not subtracted from readBufferBytesAllocated + * [THRIFT-2396] - Build Error on MacOSX + * [THRIFT-2395] - thrift Ruby gem requires development dependency 'thin' regardless of environment + * [THRIFT-2414] - c_glib fix several bug. + * [THRIFT-2420] - Go argument parser for methods without arguments does not skip fields + * [THRIFT-2439] - Bug in TProtocolDecorator Class causes parsing errors + * [THRIFT-2419] - golang - Fix fmt.Errorf in generated code + * [THRIFT-2418] - Go handler function panics on internal error + * [THRIFT-2405] - Node.js Multiplexer tests fail (silently) + * [THRIFT-2581] - TFDTransport destructor should not throw + * [THRIFT-2575] - Thrift includes siginfo_t within apache::thrift::protocol namespace + * [THRIFT-2577] - TFileTransport missuse of closesocket on windows platform + * [THRIFT-2576] - Implement Thrift.Protocol.prototype.skip method in JavaScript library + * [THRIFT-2588] - Thrift compiler is not buildable in Visual Studio 2010 + * [THRIFT-2594] - JS Compiler: Single quotes are not being escaped in constants. + * [THRIFT-2591] - TFramedTransport does not handle payloads split across packets correctly + * [THRIFT-2599] - Uncompileable Delphi code due to naming conflicts with IDL + * [THRIFT-2590] - C++ Visual Studio solution doesn't include Multiplexing support + * [THRIFT-2595] - Node.js: Fix global leaks and copy-paste errors + * [THRIFT-2565] - autoconf fails to find mingw-g++ cross compiler on travis CI + * [THRIFT-2555] - excessive "unused field" comments + * [THRIFT-2554] - double initialization in generated Read() method + * [THRIFT-2551] - OutOfMemoryError "unable to create new native thread" kills serve thread + * [THRIFT-2543] - Generated enum type in haskell should be qualified + * [THRIFT-2560] - Thrift compiler generator tries to concat ints with strings using + + * [THRIFT-2559] - Centos 6.5 unable to "make" with Thrift 0.9.1 + * [THRIFT-2526] - Assignment operators and copy constructors in c++ don't copy the __isset struct + * [THRIFT-2454] - c_glib: There is no gethostbyname_r() in some OS. + * [THRIFT-2451] - Do not use pointers for optional fields with defaults. Do not write such fields if its value set to default. Also, do not use pointers for any optional fields mapped to go map or slice. generate Get accessors + * [THRIFT-2450] - include HowToContribute in the src repo + * [THRIFT-2448] - thrift/test/test.sh has incorrect Node.js test path + * [THRIFT-2460] - unopened socket fd must be less than zero. + * [THRIFT-2459] - --version should not exit 1 + * [THRIFT-2468] - Timestamp handling + * [THRIFT-2467] - Unable to build contrib/fb303 on OSX 10.9.2 + * [THRIFT-2466] - Improper error handling for SSL/TLS connections that don't complete a handshake + * [THRIFT-2463] - test/py/RunClientServer.py fails sometimes + * [THRIFT-2458] - Generated golang server code for "oneway" methods is incorrect + * [THRIFT-2456] - THttpClient fails when using async support outside Silverlight + * [THRIFT-2524] - Visual Studio project is missing TThreadedServer files + * [THRIFT-2523] - Visual Studio project is missing OverlappedSubmissionThread files + * [THRIFT-2520] - cpp:cob_style generates incorrect .tcc file + * [THRIFT-2508] - Uncompileable C# code due to language keywords in IDL + * [THRIFT-2506] - Update TProtocolException error codes to be used consistently throughout the library + * [THRIFT-2505] - go: struct should always be a pointer to avoid copying of potentially size-unbounded structs + * [THRIFT-2515] - TLS Method error during make + * [THRIFT-2503] - C++: Fix name collision when a struct has a member named "val" + * [THRIFT-2477] - thrift --help text with misplaced comma + * [THRIFT-2492] - test/cpp does not compile on mac + * [THRIFT-2500] - sending random data crashes thrift(golang) service + * [THRIFT-2475] - c_glib: buffered_transport_write function return always TRUE. + * [THRIFT-2495] - JavaScript/Node string constants lack proper escaping + * [THRIFT-2491] - unable to import generated ThriftTest service + * [THRIFT-2490] - c_glib: if fail to read a exception from server, client may be occurred double free + * [THRIFT-2470] - THttpHandler swallows exceptions from processor + * [THRIFT-2533] - Boost version in requirements should be updated + * [THRIFT-2532] - Java version in installation requirements should be updated + * [THRIFT-2529] - TBufferedTransport split Tcp data bug in nodeJs + * [THRIFT-2537] - Path for "go get" does not work (pull request 115) + * [THRIFT-2443] - Node fails cross lang tests + * [THRIFT-2437] - Author fields in Python setup.py must be strings not lists. + * [THRIFT-2435] - Java compiler doesn't like struct member names that are identical to an existing enum or struct type + * [THRIFT-2434] - Missing namespace import for php TMultiplexedProcessor implementation + * [THRIFT-2432] - Flaky parallel build + * [THRIFT-2430] - Crash during TThreadPoolServer shutdown + * [THRIFT-667] - Period should not be allowed in identifier names + * [THRIFT-1212] - Members capital case conflict + * [THRIFT-2584] - Error handler not listened on javascript client + * [THRIFT-2294] - Incorrect Makefile generation + * [THRIFT-2601] - Fix vagrant to work again for builds again + * [THRIFT-2092] - TNonblocking server should release handler as soon as connection closes + * [THRIFT-2557] - CS0542 member names cannot be the same as their enclosing type + * [THRIFT-2605] - TSocket warning on gcc 4.8.3 + * [THRIFT-2607] - ThreadManager.cpp warning on clang++ 3.4 + * [THRIFT-1998] - TCompactProtocol.tcc - one more warning on Visual 2010 + * [THRIFT-2610] - MSVC warning in TSocket.cpp + * [THRIFT-2614] - TNonblockingServer.cpp warnings on MSVC + * [THRIFT-2608] - TNonblockingServer.cpp warnings on clang 3.4 + * [THRIFT-2606] - ThreadManager.h warning in clang++ 3.4 + * [THRIFT-2609] - TFileTransport.h unused field warning (clang 3.4) + * [THRIFT-2416] - Cannot use TCompactProtocol with MSVC + * [THRIFT-1803] - Ruby Thrift 0.9.0 tries to encode UUID to UTF8 and crashes + * [THRIFT-2385] - Problem with gethostbyname2 during make check + * [THRIFT-2262] - thrift server 'MutateRow' operation gives no indication of success / failure + * [THRIFT-2048] - Prefer boolean context to nullptr_t conversion + * [THRIFT-2528] - Thrift Erlang Library: Multiple thrift applications in one bundle + * [THRIFT-1999] - warning on gcc 4.7 while compiling BoostMutex.cpp + * [THRIFT-2104] - Structs lose binary data when transferred from server to client in Java + * [THRIFT-2184] - undefined method rspec_verify for Thrift::MemoryBufferTransport + * [THRIFT-2351] - PHP TCompactProtocol has fails to decode messages + * [THRIFT-2016] - Resource Leak in thrift struct under compiler/cpp/src/parse/t_function.h + * [THRIFT-2273] - Please delete old releases from mirroring system + * [THRIFT-2270] - Faulty library version numbering at build or documentation + * [THRIFT-2203] - Tests keeping failing on Jenkins and Travis CI + * [THRIFT-2399] - thrift.el: recognize "//"-style comments in emacs thrift-mode + * [THRIFT-2582] - "FileTransport error" exception is raised when trying to use Java's TFileTransport + * [THRIFT-1682] - Multiple thread calling a Service function unsafely causes message corruption and terminates with Broken Pipe + * [THRIFT-2357] - recurse option has no effect when generating php + * [THRIFT-2248] - Go generator doesn't deal well with map keys of type binary + * [THRIFT-2426] - clarify IP rights and contributions from fbthrift + * [THRIFT-2041] - TNonblocking server compilation on windows (ARITHMETIC_RIGHT_SHIFT) + * [THRIFT-2400] - thrift.el: recognize "//"-style comments in emacs thrift-mode + * [THRIFT-1717] - Fix deb build in jenkins + * [THRIFT-2266] - ThreadManager.h:24:10: fatal error: 'tr1/functional' file not found on Mac 10.9 (Mavericks) + * [THRIFT-1300] - Test failures with parallel builds (make -j) + * [THRIFT-2487] - Tutorial requires two IDL files but only one is linked from the Thrift web site + * [THRIFT-2329] - missing release tags within git + * [THRIFT-2306] - concurent client calls with nodejs + * [THRIFT-2222] - ruby gem cannot be compiled on OS X mavericks + * [THRIFT-2381] - code which generated by thrift2/hbase.thrift compile error + * [THRIFT-2390] - no close event when connection lost + * [THRIFT-2146] - Unable to pass multiple "--gen" options to the thrift compiler + * [THRIFT-2438] - Unexpected readFieldEnd call causes JSON Parsing errors + * [THRIFT-2498] - Error message "Invalid method name" while trying to call HBase Thrift API + * [THRIFT-841] - Build cruft + * [THRIFT-2570] - Wrong URL given in http://thrift.apache.org/developers + * [THRIFT-2604] - Fix debian packaging + * [THRIFT-2618] - Unignore /aclocal files required for build + * [THRIFT-2562] - ./configure create MakeFile in lib/d with errors + * [THRIFT-2593] - Unable to build thrift on ubuntu-12.04 (Precise) + * [THRIFT-2461] - Can't install thrift-0.8.0 on OS X 10.9.2 + * [THRIFT-2602] - Fix missing dist files + * [THRIFT-2620] - Fix python packaging + * [THRIFT-2545] - Test CPP fails to build (possibly typo) + +## Documentation + * [THRIFT-2155] - Adding one liner guide to rename the version.h.in and rename thrifty.cc.h + * [THRIFT-1991] - Add exceptions to examples + * [THRIFT-2334] - add a tutorial for node JS + * [THRIFT-2392] - Actionscript tutorial + * [THRIFT-2383] - contrib: sample for connecting Thrift with Rebus + * [THRIFT-2382] - contrib: sample for connecting Thrift with STOMP + +## Improvement + * [THRIFT-1457] - Capacity of TframedTransport write buffer is never reset + * [THRIFT-1135] - Node.js tutorial + * [THRIFT-1371] - Socket timeouts (SO_RCVTIMEO and SO_SNDTIMEO) not supported on Solaris + * [THRIFT-2142] - Minor tweaks to thrift.el for better emacs package compatibility + * [THRIFT-2268] - Modify TSaslTransport to ignore TCP health checks from loadbalancers + * [THRIFT-2264] - GitHub page incorrectly states that Thrift is still incubating + * [THRIFT-2263] - Always generate good hashCode for Java + * [THRIFT-2233] - Java compiler should defensively copy its binary inputs + * [THRIFT-2239] - Address FindBugs errors + * [THRIFT-2249] - Add SMP Build option to thrift.spec (and three config defines) + * [THRIFT-2254] - Exceptions generated by Go compiler should implement error interface + * [THRIFT-2260] - Thrift imposes unneeded dependency on commons-lang3 + * [THRIFT-2258] - Add TLS v1.1/1.2 support to TSSLSocket.cpp + * [THRIFT-2205] - Node.js Test Server to support test.js JavaScript Browser test and sundry fixes + * [THRIFT-2204] - SSL client for the cocoa client + * [THRIFT-2172] - Java compiler allocates optionals array for every struct with an optional field + * [THRIFT-2185] - use cabal instead of runhaskell in haskell library + * [THRIFT-1926] - PHP Constant Generation Refactoring + * [THRIFT-2029] - Port C++ tests to Windows + * [THRIFT-2054] - TSimpleFileTransport - Java Lib has no straight forward TTransport based file transport + * [THRIFT-2040] - "uninitialized variable" warnings on MSVC/windows + * [THRIFT-2034] - Give developers' C++ code direct access to socket FDs on server side + * [THRIFT-2095] - Use print function for Python 3 compatiblity + * [THRIFT-1868] - Make the TPC backlog configurable in the Java servers + * [THRIFT-1813] - Add @Generated annotation to generated classes + * [THRIFT-1815] - Code generators line buffer output + * [THRIFT-2305] - TFramedTransport empty constructor should probably be private + * [THRIFT-2304] - Move client assignments from construtor in method + * [THRIFT-2309] - Ruby (gem) & PHP RPM subpackages + * [THRIFT-2318] - perl: dependency Class::Accessor not checked + * [THRIFT-2317] - exclude tutorial from build + * [THRIFT-2320] - Program level doctext does not get attached by parser + * [THRIFT-2349] - Golang - improve tutorial + * [THRIFT-2348] - PHP Generator: add array typehint to functions + * [THRIFT-2344] - configure.ac: compiler-only option + * [THRIFT-2343] - Golang - Return a single error for all exceptions instead of multiple return values + * [THRIFT-2341] - Enable generation of Delphi XMLDoc comments (a.k.a. "Help Insight") + * [THRIFT-2355] - Add SSL and Web Socket Support to Node and JavaScript + * [THRIFT-2350] - Add async calls to normal JavaScript + * [THRIFT-2330] - Generate PHPDoc comments + * [THRIFT-2332] - RPMBUILD: run bootstrap (if needed) + * [THRIFT-2391] - simple socket transport for actionscript 3.0 + * [THRIFT-2376] - nodejs: allow Promise style calls for client and server + * [THRIFT-2369] - Add ssl support for nodejs implementation + * [THRIFT-2401] - Haskell tutorial compiles + * [THRIFT-2417] - C# Union classes are not partial + * [THRIFT-2415] - Named pipes server performance & message mode + * [THRIFT-2404] - emit warning on (typically inefficient) list + * [THRIFT-2398] - Improve Node Server Library + * [THRIFT-2397] - Add CORS and CSP support for JavaScript and Node.js libraries + * [THRIFT-2407] - use markdown (rename README => README.md) + * [THRIFT-2300] - D configure info output should follow same format as other languages + * [THRIFT-2579] - Windows CE support + * [THRIFT-2574] - Compiler option to generate namespace directories for Ruby + * [THRIFT-2571] - Simplify cross compilation using CMake + * [THRIFT-2569] - Introduce file to specify third party library locations on Windows + * [THRIFT-2568] - Implement own certificate handler + * [THRIFT-2552] - eliminate warning from configure.ac + * [THRIFT-2549] - Generate json tag for struct members. use go.tag annotation to override the default generated tag. + * [THRIFT-2544] - Add support for socket transport for c# library when using Windows Phone projects + * [THRIFT-2453] - haskell tutorial: fix up division by 0 example + * [THRIFT-2449] - Enhance typedef structure to distinguish between forwards and real typedefs + * [THRIFT-2446] - There is no way to handle server stream errors + * [THRIFT-2455] - Allow client certificates to be used with THttpClient + * [THRIFT-2511] - Node.js needs the compact protocol + * [THRIFT-2493] - Node.js lib needs HTTP client + * [THRIFT-2502] - Optimize go implementations of binary and compact protocols for speed + * [THRIFT-2494] - Add enum toString helper function in c_glib + * [THRIFT-2471] - Make cpp.ref annotation language agnostic + * [THRIFT-2497] - server and client for test/go, also several fixes and improvements + * [THRIFT-2535] - TJSONProtocol when serialized yields TField ids rather than names + * [THRIFT-2220] - Add a new struct structv? + * [THRIFT-1352] - Thrift server + * [THRIFT-989] - Push boost m4 macros upstream + * [THRIFT-1349] - Remove unnecessary print outs + * [THRIFT-2496] - server and client for test/go, also several fixes and improvements + * [THRIFT-1114] - Maven publish shouldn't require passwords hardcoded in settings.xml + * [THRIFT-2043] - visual 2010 warnings - unreachable code + * [THRIFT-1683] - Implement alternatives to Javascript Client side Transport protocol, just as NPAPI and WebSocket. + * [THRIFT-1746] - provide a SPDX file + * [THRIFT-1772] - Serialization does not check types of embedded structures. + * [THRIFT-2387] - nodejs: external imports should be centralized in index.js + * [THRIFT-2037] - More general macro THRIFT_UNUSED_VARIABLE + +## New Feature + * [THRIFT-1012] - Transport for DataInput DataOutput interface + * [THRIFT-2256] - Using c++11/c++0x std library replace boost library + * [THRIFT-2250] - JSON and MemoryBuffer for JavaME + * [THRIFT-2114] - Python Service Remote SSL Option + * [THRIFT-1719] - SASL client support for Python + * [THRIFT-1894] - Thrift multi-threaded async Java Server using Java 7 AsynchronousChannelGroup + * [THRIFT-1893] - HTTP/JSON server/client for node js + * [THRIFT-2347] - C# TLS Transport based on THRIFT-181 + * [THRIFT-2377] - Allow addition of custom HTTP Headers to an HTTP Transport + * [THRIFT-2408] - Named Pipe Transport Option for C# + * [THRIFT-2572] - Add string/collection length limit checks (from C++) to java protocol readers + * [THRIFT-2469] - "java:fullcamel" option to automatically camel-case underscored attribute names + * [THRIFT-795] - Importing service functions (simulation multiple inheritance) + * [THRIFT-2164] - Add a Get/Post Http Server to Node along with examples + * [THRIFT-2255] - add Parent Class for generated Struct class + +## Question + * [THRIFT-2539] - Tsocket.cpp addrinfo ai_flags = AI_ADDRCONFIG + * [THRIFT-2440] - how to connect as3 to java by thrift , + * [THRIFT-2379] - Memmory leaking while using multithreading in C++ server. + * [THRIFT-2277] - Thrift: installing fb303 error + * [THRIFT-2567] - Csharp slow ? + * [THRIFT-2573] - thrift 0.9.2 release + +## Sub-task + * [THRIFT-981] - cocoa: add version Info to the library + * [THRIFT-2132] - Go: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-2299] - TJsonProtocol implementation for Ruby does not allow for both possible slash (solidus) encodings + * [THRIFT-2298] - TJsonProtocol implementation for C# does not allow for both possible slash (solidus) encodings + * [THRIFT-2297] - TJsonProtocol implementation for Delphi does not allow for both possible slash (solidus) encodings + * [THRIFT-2271] - JavaScript: Support for Multiplexing Services + * [THRIFT-2251] - go test for compact protocol is not running + * [THRIFT-2195] - Delphi: Add event handlers for server and processing events + * [THRIFT-2176] - TSimpleJSONProtocol.ReadFieldBegin() does not return field type and ID + * [THRIFT-2175] - Wrong field type set for binary + * [THRIFT-2174] - Deserializing JSON fails in specific cases + * [THRIFT-2053] - NodeJS: Support for Multiplexing Services + * [THRIFT-1914] - Python: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-1810] - add ruby to test/test.sh + * [THRIFT-2310] - PHP: Client-side support for Multiplexing Services + * [THRIFT-2346] - C#: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-2345] - Delphi: UTF-8 sent by PHP as JSON is not understood by TJsonProtocol + * [THRIFT-2338] - First doctext wrongly interpreted as program doctext in some cases + * [THRIFT-2325] - SSL test certificates + * [THRIFT-2358] - C++: add compact protocol to cross language test suite + * [THRIFT-2425] - PHP: Server-side support for Multiplexing Services + * [THRIFT-2421] - Tree/Recursive struct support in thrift + * [THRIFT-2290] - Update Go tutorial to align with THRIFT-2232 + * [THRIFT-2558] - CSharp compiler generator tries to concat ints with strings using + + * [THRIFT-2507] - Additional LUA TProtocolException error code needed? + * [THRIFT-2499] - Compiler: allow annotations without "= value" + * [THRIFT-2534] - Cross language test results should recorded to a status.md or status.html file automatically + * [THRIFT-66] - Java: Allow multiplexing multiple services over a single TCP connection + * [THRIFT-1681] - Add Lua Support + * [THRIFT-1727] - Ruby-1.9: data loss: "binary" fields are re-encoded + * [THRIFT-1726] - Ruby-1.9: "binary" fields are represented by string whose encoding is "UTF-8" + * [THRIFT-988] - perl: add version Info to the library via configure + * [THRIFT-334] - Compact Protocol for PHP + * [THRIFT-2444] - pull request 88: thrift: clean up enum value assignment + +## Task + * [THRIFT-2223] - Spam links on wiki + * [THRIFT-2566] - Please create a DOAP file for your TLP + * [THRIFT-2237] - Update archive to contain all versions + * [THRIFT-962] - Tutorial page on our website is really unhelpful + +## Test + * [THRIFT-2327] - nodejs: nodejs test suite should be bundled with the library + * [THRIFT-2445] - THRIFT-2384 (code generation for go maps with binary keys) should be tested + * [THRIFT-2501] - C# The test parameters from the TestServer and TestClient are different from the http://thrift.apache.org/test/ + +## Wish + * [THRIFT-2190] - Add the JavaScript thrift.js lib to the Bower registry + * [THRIFT-2076] - boost::optional instead of __isset + + + +Thrift 0.9.1 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-1440] - debian packaging: minor-ish policy problems + * [THRIFT-1402] - Generated Y_types.js does not require() X_types.js when an include in the IDL file was used + * [THRIFT-1551] - 2 thrift file define only struct (no service), one include another, the gen nodejs file didn't have "requires" at the top + * [THRIFT-1264] - TSocketClient is queried by run loop after deallocation in Cocoa + * [THRIFT-1600] - Thrift Go Compiler and Library out of date with Go 1 Release. + * [THRIFT-1603] - Thrift IDL allows for multiple exceptions, args or struct member names to be the same + * [THRIFT-1062] - Problems with python tutorials + * [THRIFT-864] - default value fails if identifier is a struct + * [THRIFT-930] - Ruby and Haskell bindings don't properly support DESTDIR (makes packaging painful) + * [THRIFT-820] - The readLength attribute of TBinaryProtocol is used as an instance variable and is decremented on each call of checkReadLength + * [THRIFT-1640] - None of the tutorials linked on the website contain content + * [THRIFT-1637] - NPM registry does not include version 0.8 + * [THRIFT-1648] - NodeJS clients always receive 0 for 'double' values. + * [THRIFT-1660] - Python Thrift library can be installed with pip but not easy_install + * [THRIFT-1657] - Chrome browser sending OPTIONS method before POST in xmlHttpRequest + * [THRIFT-2118] - Certificate error handling still incorrect + * [THRIFT-2137] - Ruby test lib fails jenkins build #864 + * [THRIFT-2136] - Vagrant build not compiling java, ruby, php, go libs due to missing dependencies + * [THRIFT-2135] - GO lib leaves behind test files that are auto generated + * [THRIFT-2134] - mingw-cross-compile script failing with strip errors + * [THRIFT-2133] - java TestTBinaryProtocol.java test failing + * [THRIFT-2126] - lib/cpp/src/thrift/concurrency/STD* files missing from DIST + * [THRIFT-2125] - debian missing from DIST + * [THRIFT-2124] - .o, .so, .la, .deps, .libs, gen-* files left tutorials, test and lib/cpp when making DIST + * [THRIFT-2123] - GO lib missing files in DIST build + * [THRIFT-2121] - Compilation bug for Node.js + * [THRIFT-2129] - php ext missing from dist + * [THRIFT-2128] - lib GO tests fail with funct ends without a return statement + * [THRIFT-2286] - Failed to compile Thrift0.9.1 with boost1.55 by VS2010 if select Debug-mt&x64 mode. + * [THRIFT-1973] - TCompactProtocol in C# lib does not serialize and deserialize negative int32 and int64 number correctly + * [THRIFT-1992] - casts in TCompactProtocol.tcc causing "dereferencing type-punned pointer will break strict-aliasing rules" warnings from gcc + * [THRIFT-1930] - C# generates unsigned byte for Thrift "byte" type + * [THRIFT-1929] - Update website to use Mirrors for downloads + * [THRIFT-1928] - Race may still exist in TFileTransport::flush() + * [THRIFT-1934] - Tabs in Example section on main page are not working + * [THRIFT-1933] - Delphi generator crashes when a typedef references another typedef from an included file + * [THRIFT-1942] - Binary accelerated cpp extension does not use Thrift namespaces for Exceptions + * [THRIFT-1959] - C#: Add Union TMemoryBuffer support + * [THRIFT-1958] - C#: Use static Object.Equals instead of .Equals() calls in equals + * [THRIFT-1957] - NodeJS TFramedTransport and TBufferedTransport read bytes as unsigned + * [THRIFT-1955] - Union Type writer generated in C# does not WriteStructBegin + * [THRIFT-1952] - Travis CI + * [THRIFT-1949] - WP7 build broken + * [THRIFT-1943] - docstrings for enum values are ignored + * [THRIFT-2070] - Improper `HexChar' and 'HexVal' implementation in TJSONProtocol.cs + * [THRIFT-2017] - Resource Leak in thrift struct under compiler/cpp/src/parse/t_program.h + * [THRIFT-2032] - C# client leaks sockets/handles + * [THRIFT-1996] - JavaME Constants generation is broken / inconsistent with regular Java generation + * [THRIFT-2002] - Haskell: Test use Data.Maybe instead of Maybe + * [THRIFT-2051] - Vagrant fails to build erlang + * [THRIFT-2050] - Vagrant C# lib compile fails with TException missing + * [THRIFT-1978] - Ruby: Thrift should allow for the SSL verify mode to be set + * [THRIFT-1984] - namespace collision in python bindings + * [THRIFT-1988] - When trying to build a debian package it fails as the file NEWS doesn't exist + * [THRIFT-1975] - TBinaryProtocol CheckLength can't be used for a client + * [THRIFT-1995] - '.' allowed at end of identifier generates non-compilable code + * [THRIFT-2112] - Error in Go generator when using typedefs in map keys + * [THRIFT-2088] - Typos in Thrift compiler help text + * [THRIFT-2080] - C# multiplex processor does not catch IOException + * [THRIFT-2082] - Executing "gmake clean" is broken + * [THRIFT-2102] - constants are not referencing to correct type when included from another thrift file + * [THRIFT-2100] - typedefs are not correctly referenced when including from other thrift files + * [THRIFT-2066] - 'make install' does not install two headers required for C++ bindings + * [THRIFT-2065] - Not valid constants filename in Java + * [THRIFT-2047] - Thrift.Protocol.TCompactProtocol, intToZigZag data lost (TCompactProtocol.cs) + * [THRIFT-2036] - Thrift gem warns about class variable access from top level + * [THRIFT-2057] - Vagrant fails on php tests + * [THRIFT-2105] - Generated code for default values of collections ignores t_field::T_REQUIRED + * [THRIFT-2091] - Unnecessary 'friend' declaration causes warning in TWinsockSingleton + * [THRIFT-2090] - Go generator, fix including of other thrift files + * [THRIFT-2106] - Fix support for namespaces in GO generator + * [THRIFT-1783] - C# doesn't handle required fields correctly + * [THRIFT-1782] - async only defined in silverlight + * [THRIFT-1779] - Missing process_XXXX method in generated TProcessor implementation for all 'oneway' service functions + * [THRIFT-1692] - SO_REUSEADDR allows for socket hijacking on Windows + * [THRIFT-1720] - JRuby times out on successful connection + * [THRIFT-1713] - Named and Anonymous Pipe transport (Delphi) + * [THRIFT-1699] - Native Union#read has extra read_field_end call + * [THRIFT-1749] - Python TSSLSocket error handling obscures actual error + * [THRIFT-1748] - Guard and RWGuard macros defined in global namespace + * [THRIFT-1734] - Front webpage is still advertising v0.8 as current release + * [THRIFT-1729] - C glib refactor left empty folders in svn + * [THRIFT-1767] - unions can't have required fields (Delphi) + * [THRIFT-1765] - Incorrect error message printed for null or negative keys + * [THRIFT-1778] - Configure requires manual intervention due to tar failure + * [THRIFT-1777] - TPipeServer is UNSTOPPABLE + * [THRIFT-1753] - Multiple C++ Windows, OSX, and iOS portability issues + * [THRIFT-1756] - 'make -j 8' fails with "unterminated #ifdef" error + * [THRIFT-1773] - Python library should run on python 2.4 + * [THRIFT-1769] - unions can't have required fields (C++) + * [THRIFT-1768] - unions can't have required fields (Compiler) + * [THRIFT-1666] - htonll usage in TBinaryProtocol.tcc generates warning with MSVC2010 + * [THRIFT-1919] - libthrift depends on httpcore-4.1.3 (directly) and httpcore-4.1.4 (transitively) + * [THRIFT-1864] - implement event handler for non-blocking server + * [THRIFT-1859] - Generated error c++ code with -out and include_prefix param + * [THRIFT-1869] - TThreadPoolServer (java) dies when threadpool is consumed + * [THRIFT-1842] - Memory leak with Pipes + * [THRIFT-1838] - Can't build compiler on OS X because of missing thrifty.h + * [THRIFT-1846] - Restore socket.h header to support builds with Android NDK + * [THRIFT-1850] - make check hangs on TSocket tests in TransportTest.cpp + * [THRIFT-1873] - Binary protocol factory ignores struct read/write flags + * [THRIFT-1872] - issues with TBufferedTransport buffer + * [THRIFT-1904] - Incorrect code is generated for typedefs which use included types + * [THRIFT-1903] - PHP namespaces cause binary protocols to not be used + * [THRIFT-1895] - Delphi: reserved variable name "result" not detected properly + * [THRIFT-1881] - TNonblockingServer does not release open connections or threads on shutdown + * [THRIFT-1888] - Java Thrift client can't connect to Python Thrift server on same host + * [THRIFT-1831] - Bug in list deserializer + * [THRIFT-1824] - many compile warning, becase Thread.h includes config.h + * [THRIFT-1823] - Missing parenthesis breaks "IS_..." macro in generated code + * [THRIFT-1806] - Python generation always truncates __init__.py files + * [THRIFT-1795] - Race condition in TThreadedServerPool java implementation + * [THRIFT-1794] - C# asyncctp broken + * [THRIFT-1804] - Binary+compact protocol single byte error in Ruby library (ARM architecture): caused by different char signedness + * [THRIFT-1800] - Documentation text not always escaped correctly when rendered to HTML + * [THRIFT-1788] - C#: Constants static constructor does not compile + * [THRIFT-1816] - Need "require" included thrift files in "xxx_types.js" + * [THRIFT-1907] - Compiling namespace and sub-namespace directives for unrecognized generators should only be a warning + * [THRIFT-1913] - skipping unknown fields in java unions + * [THRIFT-2553] - C++ linker error - transport/TSocket + * [THRIFT-274] - Towards a working release/versioning process + +## Documentation + * [THRIFT-1971] - [Graphviz] Adds tutorial/general description documentation + * [THRIFT-2001] - http://thrift.apache.org/ Example "C++ Server" tab is broken + +## Improvement + * [THRIFT-1574] - Apache project branding requirements: DOAP file [PATCH] + * [THRIFT-1347] - Unify the exceptions returned in generated Go code + * [THRIFT-1353] - Switch to performance branch, get rid of BinaryParser + * [THRIFT-1629] - Ruby 1.9 Compatibility during Thrift configure, make, install + * [THRIFT-991] - Refactor Haskell code and generator + * [THRIFT-990] - Sanify gettimeofday usage codebase-wide + * [THRIFT-791] - Let C++ TSimpleServer be driven by an external main loop + * [THRIFT-2117] - Cocoa TBinaryProtocol strictWrite should be set to true by default + * [THRIFT-2014] - Change C++ lib includes to use style throughout + * [THRIFT-1972] - Add support for async processors + * [THRIFT-1970] - [Graphviz] Adds option to render exceptions relationships + * [THRIFT-1966] - Support different files for SSL certificates and keys + * [THRIFT-1965] - Adds Graphviz (graph description language) generator + * [THRIFT-1956] - Switch to Apache Commons Lang 3 + * [THRIFT-1962] - Multiplex processor should send any TApplicationException back to client + * [THRIFT-1960] - main() declares 22 unused gen bools + * [THRIFT-1951] - libthrift.jar has source files in it + * [THRIFT-1997] - Add accept backlog configuration method to TServerSocket + * [THRIFT-2003] - Deprecate senum + * [THRIFT-2052] - Vagrant machine image defaults to only 384MB of RAM + * [THRIFT-1980] - Modernize Go tooling, fix go client libary. + * [THRIFT-1977] - C# compiler should generate constant files prefixed with thrift file name + * [THRIFT-1985] - add a Vagrantfile to build and test Apache Thrift fully reproducable + * [THRIFT-1994] - Deprecate slist + * [THRIFT-1993] - Factory to create instances from known (generated) interface types with Delphi + * [THRIFT-2081] - Specified timeout should be used in TSocket.Open() + * [THRIFT-2084] - Delphi: Ability to create entity Thrift-generated instances based on TypeInfo + * [THRIFT-2083] - Improve the go lib: buffered Transport, save memory allocation, handle concurrent request + * [THRIFT-2109] - Secure connections should be supported in Go + * [THRIFT-2107] - minor Go generator fixes + * [THRIFT-1695] - allow warning-free compilation in VS 2012 and GNU 4.6 + * [THRIFT-1735] - integrate tutorial into regular build + * [THRIFT-1716] - max allowed connections should be PIPE_UNLIMITED_INSTANCES + * [THRIFT-1715] - Allow excluding python parts when building contrib/fb303 + * [THRIFT-1733] - Fix RPM build issues on RHEL6/OL6 systems + * [THRIFT-1728] - Upgradation of httpcomponents + * [THRIFT-1876] - Use enum names instead of casted integers in assignments + * [THRIFT-1874] - timeout for the server-side end of a named pipe + * [THRIFT-1897] - Support validation of required fields + * [THRIFT-1896] - Add TBase protocol for Cocoa + * [THRIFT-1880] - Make named pipes server work asynchronously (overlapped) to allow for clean server stops + * [THRIFT-1878] - Add the possibility to send custom headers + * [THRIFT-1882] - Use single include + * [THRIFT-1793] - C#: Use static read instead of instance read + * [THRIFT-1799] - Option to generate HTML in "standalone mode" + * [THRIFT-1815] - Code generators line buffer output + * [THRIFT-1890] - C++: Make named pipes server work asynchronously + * [THRIFT-474] - Generating Ruby on Rails friendly code + +## New Feature + * [THRIFT-801] - Provide an interactive shell (irb) when generating ruby bindings + * [THRIFT-2292] - Android Library Project + * [THRIFT-2012] - Modernizing Go + * [THRIFT-1969] - C#: Tests not properly linked from the solution + * [THRIFT-1785] - C#: Add TMemoryBuffer serializer/deserializer + * [THRIFT-1780] - Add option to generate nullable values + * [THRIFT-1786] - C# Union Typing + * [THRIFT-591] - Make the C++ runtime library be compatible with Windows and Visual Studio + * [THRIFT-514] - Add option to configure compiler output directory + +## Question + * [THRIFT-1764] - how to get the context of client when on a rpc call in server side? + * [THRIFT-1791] - thrift's namespace directive when generating haskell code + +## Sub-task + * [THRIFT-1594] - Java test clients should have a return codes that reflect whether it succeeds or not. + * [THRIFT-1595] - Java test server should follow the documented behavior as of THRIFT-1590 + * [THRIFT-986] - st: add version Info to the library + * [THRIFT-985] - php: add version Info to the library + * [THRIFT-984] - ocaml: add version Info to the library + * [THRIFT-1924] - Delphi: Inconsistency in serialization of optional fields + * [THRIFT-1922] - C#: Inconsistency in serialization of optional fields + * [THRIFT-1961] - C# tests should be in lib/csharp/test/... + * [THRIFT-1822] - PHP unit test does not work + * [THRIFT-1902] - C++: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-1901] - C#: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-1899] - Delphi: Support for Multiplexing Services on any Transport, Protocol and Server + * [THRIFT-563] - Support for Multiplexing Services on any Transport, Protocol and Server + + + +Thrift 0.9 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-1438] - lib/cpp/src/windows/config.h should read version from configure.ac rather than a #define + * [THRIFT-1446] - Compile error with Delphi 2009 in constant initializer + * [THRIFT-1450] - Problems building thrift 0.8.0 for Python and Ruby + * [THRIFT-1449] - Ruby client does not work on solaris (?) + * [THRIFT-1447] - NullpointerException in ProcessFunction.class :in "oneway" method + * [THRIFT-1433] - TServerSocket fix for MSVC + * [THRIFT-1429] - The nonblocking servers is supposed to use TransportFactory to read the data + * [THRIFT-1427] - PHP library uses non-multibyte safe functions with mbstring function overloading + * [THRIFT-1421] - Debian Packages can not be built + * [THRIFT-1394] - Treatment of optional fields is not consistent between C++ and Java + * [THRIFT-1511] - Server with oneway support ( JAVA ) + * [THRIFT-1496] - PHP compiler not namespacing enums + * [THRIFT-1495] - PHP TestClient fatals on missing class + * [THRIFT-1508] - TServerSocket does not allow for the user to specify the IP address to bind to + * [THRIFT-1504] - Cocoa Generator should use local file imports for base Thrift headers + * [THRIFT-1512] - Thrift socket support for Windows XP + * [THRIFT-1502] - TSimpleServer::serve(): Do not print out error message if server was stopped. + * [THRIFT-1501] - PHP old namespaces not generated for enums + * [THRIFT-1483] - java compiler does not generate type parameters for services in extended clauses + * [THRIFT-1479] - Compiled PHP process functions missing writeMessageEnd() + * [THRIFT-1492] - enabling c_glib render thrift unusable (even for C++ code) + * [THRIFT-1491] - Uninitialize processorFactory_ member in TServer.h + * [THRIFT-1475] - Incomplete records generation for Erlang + * [THRIFT-1486] - Javascript manual testserver not returning content types + * [THRIFT-1488] - src/concurrency/Thread.h:91:58: error: invalid conversion from 'pthread_t {aka _opaque_pthread_t*}' to 'apache::thrift::concurrency::Thread::id_t {aka long long unsigned int}' [-fpermissive] + * [THRIFT-1490] - Windows-specific header files - fixes & tweaks + * [THRIFT-1526] - Union TupleSchemeFactory returns StandardSchemes + * [THRIFT-1527] - Generated implementation of tupleReadStruct in unions return null when the setfield is unrecognized + * [THRIFT-1524] - TNonBlockingServer does not compile in Visual Studio 2010 + * [THRIFT-1529] - TupleProtocol can unintentionally include an extra byte in bit vectors when number of optional fields is an integral of 8 + * [THRIFT-1473] - JSON context stack may be left in an incorrect state when an exception is thrown during read or write operations + * [THRIFT-1456] - System.Net.HttpWebRequest' does not contain a definition for 'Proxy' + * [THRIFT-1468] - Memory leak in TSaslServerTransport + * [THRIFT-1461] - Recent TNonblockingServer changes broke --enable-boostthreads=yes, Windows + * [THRIFT-1460] - why not add unicode strings support to python directly? + * [THRIFT-1464] - AbstractNonblockingServer.FrameBuffer TNonblockingTransport accessor changed from public to private + * [THRIFT-1467] - Possible AV with empty strings when using JSON protocol + * [THRIFT-1523] - clientTimeout not worked as expected in TServerSocket created by TSSLTransportFactory + * [THRIFT-1537] - TFramedTransport issues + * [THRIFT-1519] - Thirft Build Failure referencing rb_intern2 symbol + * [THRIFT-1518] - Generated C++ code only sends the first optional field in the write() function for a struct. + * [THRIFT-1515] - NameError: global name 'TApplicationException' is not defined + * [THRIFT-1554] - Inherited service methods are not resolved in derived service implementations + * [THRIFT-1553] - thrift nodejs service side can't read map structure, key as enum, value as Object + * [THRIFT-1575] - Typo in server/TThreadPoolServer.h + * [THRIFT-1327] - Fix Spec Suite under Ruby-1.8.7 (works for MRI Ruby-1.9.2) + * [THRIFT-1326] - on some platforms, #include is necessary to be included in Thrift.h + * [THRIFT-1159] - THttpClient->Flush() issue (connection thru proxy) + * [THRIFT-1277] - Node.js serializes false booleans as null + * [THRIFT-1224] - Cannot insert UTF-8 text + * [THRIFT-1267] - Node.js can't throw exceptions. + * [THRIFT-1338] - Do not use an unpatched autoconf 2.65 to generate release tarball + * [THRIFT-1128] - MAC OS X: thrift.h incompatibility with Thrift.h + * [THRIFT-1631] - Fix C++ server constructor typos + * [THRIFT-1602] - PHP C Extension is not Compatible with PHP 5.4 + * [THRIFT-1610] - IWebProxy not available on WP7 platform + * [THRIFT-1606] - Race condition in BoostThreadFactory.cpp + * [THRIFT-1604] - Python exception handeling for changes from PEP 3110 + * [THRIFT-1607] - Incorrect file modes for several source files + * [THRIFT-1583] - c_glib leaks memory + * [THRIFT-1582] - Bad includes of nested thrift files in c_glib + * [THRIFT-1578] - C_GLib generated code does not compile + * [THRIFT-1597] - TJSONProtocol.php is missing from Makefile.am + * [THRIFT-1591] - Enable TCP_NODELAY for ruby gem + * [THRIFT-1624] - Isset Generated differently on different platforms + * [THRIFT-1622] - Incorrect size returned on read + * [THRIFT-1621] - Memory leaks + * [THRIFT-1612] - Base64 encoding is broken + * [THRIFT-1627] - compiler built using compilers.vcxproj cannot be used to build some test .thrift files + * [THRIFT-1571] - Update Ruby HTTP transport for recent Ruby versions + * [THRIFT-1023] - Thrift encoding (UTF-8) issue with Ruby 1.9.2 + * [THRIFT-1090] - Document the generation of a file called "Constants.java" + * [THRIFT-1082] - Thrift::FramedTransport sometimes calls close() on an undefined value + * [THRIFT-956] - Python module's version meta-data should be updated + * [THRIFT-973] - Cocoa library won't compile using clang + * [THRIFT-1632] - ruby: data corruption in thrift_native implementation of MemoryBufferTransport + * [THRIFT-1665] - TBinaryProtocol: exceeded message length raises generic TException + * [THRIFT-1664] - Reference to non-existing variable in build script + * [THRIFT-1663] - Java Thrift server is not throwing exceptions + * [THRIFT-1662] - "removeObject:" should be "removeObserver:" in [-TSocketServer dealloc]? + * [THRIFT-1643] - Denial of Service attack in TBinaryProtocol.readString + * [THRIFT-1674] - Update Thrift D library to be compatible with 2.060 + * [THRIFT-1673] - Ruby compile flags for extension for multi arch builds (os x) + * [THRIFT-1655] - Configure still trying to use thrift_generators in output + * [THRIFT-1654] - c_glib thrift_socket_read() returns corrupted data + * [THRIFT-1653] - TThreadedSelectorServer leaks CLOSE_WAIT sockets + * [THRIFT-1658] - Java thrift server is not throwing TApplicationException + * [THRIFT-1656] - Setting proper headers in THttpServer.cpp so that "Cross-Origin Resource Sharing" on js client can work. + * [THRIFT-1652] - TSaslTransport does not log the error when kerberos auth fails + * [THRIFT-2272] - CLONE - Denial of Service attack in TBinaryProtocol.readString + * [THRIFT-2086] - Invalid generated code for Node.JS when using namespaces + * [THRIFT-1686] - t_php_generator.cc uses "and" instead of "&&", and causes compiler errors with Visual Studio + * [THRIFT-1693] - libthrift has dependency on two different versions of httpcore + * [THRIFT-1689] - don't exit(-1) in TNonblockingServer + * [THRIFT-1679] - NodeJS: protocol readString() should treat string as utf8, not binary + * [THRIFT-1721] - Dist broken due to 0.8.0 to 0.9.0 changes + * [THRIFT-1710] - Minor issues in test case code + * [THRIFT-1709] - Warning "Bitwise-or operator used on a sign-extended operand; consider casting to a smaller unsigned type first" in TBinaryProtocol.cs at ReadInt64() + * [THRIFT-1707] - [ruby] Adjust server_spec.rb for RSpec 2.11.x and Ruby 1.9.3 + * [THRIFT-1671] - Cocoa code generator does not put keywords into generated method calls + * [THRIFT-1670] - Incompatibilities between different versions of a Thrift interface + * [THRIFT-1669] - NameError: global name 'TApplicationException' is not defined + * [THRIFT-1668] - Compile error in contrib/fb303, thrift/TDispatchProcessor.h: No such file or directory + * [THRIFT-1845] - Fix compiler warning caused by implicit string conversion with Xcode 4.6 + * [THRIFT-304] - Building the Python library requires development headers + * [THRIFT-369] - sets and maps break equality + * [THRIFT-556] - Ruby compiler does not correctly referred to top-level modules when a submodule masks the top-level name + * [THRIFT-481] - indentation of ruby classes is off by a few + +## Improvement + * [THRIFT-1498] - Allow TThreadedPoolServer.Args to pass a ExecutorService + * [THRIFT-1444] - FunctionRunner - add syntactic sugar to create shared_ptrs + * [THRIFT-1443] - define a TProcessor helper class to implement process() + * [THRIFT-1441] - Generate constructor with parameters for exception class to let it update message property automatically. + * [THRIFT-1520] - Embed version number in erlang .app file + * [THRIFT-1480] - python: remove tabs, adjust whitespace and address PEP8 warnings + * [THRIFT-1485] - Performance: pass large and/or refcounted arguments as "const" + * [THRIFT-1484] - Introduce phpunit test suite + * [THRIFT-1532] - The type specifications in the generated Erlang code should include "undefined" where it's used as a default value + * [THRIFT-1534] - Required fields in the Delphi code generator. + * [THRIFT-1469] - Java isset space optimization + * [THRIFT-1465] - Visibility of methods in generated java code + * [THRIFT-1453] - Don't change types of arguments when serializing with thrift php extension + * [THRIFT-1452] - generate a swap() method for all generated structs + * [THRIFT-1451] - FramedTransport: Prevent infinite loop when writing + * [THRIFT-1521] - Two patches for more Performance + * [THRIFT-1555] - Delphi version of the tutorial code + * [THRIFT-1535] - Why thrift don't use wrapped class for optional fields ? + * [THRIFT-1204] - Ruby autogenerated files should require 'thrift' gem + * [THRIFT-1344] - Using the httpc module directly rather than the deprecated http layer + * [THRIFT-1343] - no_auto_import min/2 to avoid compile warning + * [THRIFT-1340] - Add support of ARC to Objective-C + * [THRIFT-1611] - Improved code generation for typedefs + * [THRIFT-1593] - Pass on errors like "connection closed" to the handler module + * [THRIFT-1615] - PHP Namespace + * [THRIFT-1567] - Thrift/cpp: Allow alternate classes to be used for + * [THRIFT-1072] - Missing - (id) initWithSharedProcessor in TSharedProcessorFactory.h + * [THRIFT-1650] - [ruby] Update clean items and svn:ignore entries for OS X artifacts + * [THRIFT-1661] - [PATCH] Add --with-qt4 configure option + * [THRIFT-1675] - Do we have any plan to support scala? + * [THRIFT-1645] - Replace Object#tee with more conventional Object#tap in specs + * [THRIFT-1644] - Upgrade RSpec to 2.10.x and refactor specs as needed + * [THRIFT-1672] - MonoTouch (and Mono for Android) compatibility + * [THRIFT-1702] - a thrift manual + * [THRIFT-1694] - Re-Enable serialization for WP7 Silverlight + * [THRIFT-1691] - Serializer/deserializer support for Delphi + * [THRIFT-1688] - Update IDL page markup + * [THRIFT-1725] - Tutorial web pages for Delphi and C# + * [THRIFT-1714] - [ruby] Explicitly add CWD to Ruby test_suites.rb + * [THRIFT-317] - Issues with Java struct validation + * [THRIFT-164] - Build web tutorial on Incubator web site + * [THRIFT-541] - Cocoa code generator doesn't put keywords before all arguments. + * [THRIFT-681] - The HTML generator does not handle JavaDoc style comments very well + +## New Feature + * [THRIFT-1500] - D programming language support + * [THRIFT-1510] - There should be an implementation of the JsonProtocol for ruby + * [THRIFT-1115] - python TBase class for dynamic (de)serialization, and __slots__ option for memory savings + * [THRIFT-1953] - support for asp.net mvc 3 + +## Question + * [THRIFT-1235] - How could I use THttpServerTransportFactory withTNonBlockingServer + * [THRIFT-1368] - TNonblockingServer usage + * [THRIFT-1061] - Read an invalid frame size of 0. Are you using TFramedTransport on the client side? + * [THRIFT-491] - Ripping raw pthreads out of TFileTransport and associated test issues + +## Sub-task + * [THRIFT-1596] - Delphi: Test clients should have a return codes that reflect whether they succeeded or not + * [THRIFT-982] - javame: add version Info to the library + * [THRIFT-1722] - C# WP7 Assembly addition beaks mono build + * [THRIFT-336] - Compact Protocol in C# + +## Test + * [THRIFT-1613] - Add code back into empty source file ToStringTest.java + * [THRIFT-1718] - Incorrect check in TFileTransportTest + +## Wish + * [THRIFT-1463] - Decouple Thrift IDL from generators + * [THRIFT-1466] - Proper Documentation for Thrift C Glib + * [THRIFT-1539] - Build and distribute the fb303 python libraries along with thrift + * [THRIFT-1685] - Please add "aereo.com" to "Powered by Apache Thrift" list in about page + * [THRIFT-330] - TProcessor - additional method to called when connection is broken + + + +Thrift 0.8 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-1436] - pip install thrift fails on Windows with "Unable to find vcvarsall.bat" + * [THRIFT-1432] - Javascript struct constants declared in the same file as their struct definition will cause an error + * [THRIFT-1428] - shared.thrft does not include namespace for php, so thrift compiler generate incorrect name + * [THRIFT-1426] - Dist package missing files for release 0.8 + * [THRIFT-1425] - The Node package is incompatible with latest node (0.6) & npm (1.0.27) + * [THRIFT-1416] - Python Unit test is broken on ci + * [THRIFT-1419] - AbstractNonBlockingServer does not catch errors when invoking the processor + * [THRIFT-1424] - Ruby specs fail when run with rake + * [THRIFT-1420] - Nonblocking and HsHa server should make sure to close all their socket connections when the selector exits + * [THRIFT-1413] - Generated code does not read MapEnd / ListEnd / SetEnd + * [THRIFT-1409] - Name conflict check does not work properly for exception object(Delphi). + * [THRIFT-1408] - Delphi Test Server: Exception test case fails due to naming conflict with e.message + * [THRIFT-1407] - Typo in Python socket server causes Thrift to fail when we enable a global socket timout + * [THRIFT-1397] - CI server fails during build due to unused parameters in delphi generator + * [THRIFT-1404] - Delphi compiler generates struct reader code with problem. + * [THRIFT-1400] - Ruby native extension aborts with __stack_chk_fail in OSX + * [THRIFT-1399] - One of the TServerImpl.Create CTORs lacks implementation + * [THRIFT-1390] - Debian packages build fix for Squeeze (build from the official 0.7.0 tarball) + * [THRIFT-1393] - TTransportException's thrown from THttpClient contain superfluous slashes in the Exception message + * [THRIFT-1392] - Enabling both namespaces and autoloading in generated PHP code won't work. + * [THRIFT-1406] - Build error after applying THRIFT-1395 + * [THRIFT-1405] - Delphi compiler does not generates container serializer properly. + * [THRIFT-1411] - java generator does not provide type parameter for TBaseProcessor + * [THRIFT-1473] - JSON context stack may be left in an incorrect state when an exception is thrown during read or write operations + * [THRIFT-1331] - Ruby library deserializes an empty map to nil + * [THRIFT-1330] - PHP Namespaces no longer generated + * [THRIFT-1328] - TBaseHelper.toString(...) appends ByteBuffer data outside of valid buffer range + * [THRIFT-1322] - OCaml lib fail to compile: Thrift.ml line 305, int vs int32 mismatch + * [THRIFT-1143] - Build doesn't detect correct architecture type on 64bit osx + * [THRIFT-1205] - port server unduly fragile with arbitrary input + * [THRIFT-1279] - type set is handled incorrectly when writing object + * [THRIFT-1298] - Standard scheme doesn't read or write metadata along with field values + * [THRIFT-1265] - C++ container deserialize + * [THRIFT-1263] - publish ruby client to rubygems + * [THRIFT-1384] - Java help menu missing newline near javame flag + * [THRIFT-1382] - Bundle install doesnot work because thrift crashes + * [THRIFT-1381] - Thrift C++ libs have incorrectly versioned names + * [THRIFT-1350] - Go library code does not build as of r60 (most recent release) + * [THRIFT-1365] - TupleProtocol#writeBitSet unintentionally writes a variable length byte array + * [THRIFT-1359] - --gen-cob cpp:cob_style does not compile anymore + * [THRIFT-1319] - Mismatch between how a union reads and writes a container + * [THRIFT-1309] - libfb303-0.7.0.jar missing in maven repository + * [THRIFT-1238] - Thrift JS client cannot read map of structures + * [THRIFT-1254] - Code can't be compiled against a regular JRE: Object.clone() override has a different return type + * [THRIFT-1367] - Mac OSX build fails with "no such file to load -- spec/rake/spectask" + * [THRIFT-1355] - Running make in lib/rb doesn't build the native extensions + * [THRIFT-1370] - Debian packaging should Build-Depend on libglib2.0-dev + * [THRIFT-1342] - Compilation problem on Windows of fastbinary.c + * [THRIFT-1341] - TProtocol.h endian detection wrong with boost + * [THRIFT-1583] - c_glib leaks memory + * [THRIFT-1582] - Bad includes of nested thrift files in c_glib + * [THRIFT-1578] - C_GLib generated code does not compile + * [THRIFT-1027] - 'make -j 16' fails with "unterminated #ifdef" error + * [THRIFT-1121] - Java server performance regression in 0.6 + * [THRIFT-857] - tests run by "make install" fail if generators are disabled + * [THRIFT-380] - Use setuptools for python build + +## Dependency upgrade + * [THRIFT-1257] - thrift's dependency scope on javax.servlet:servlet-api should be 'provided' + +## Improvement + * [THRIFT-1445] - minor C++ generator variable cleanup + * [THRIFT-1435] - make TException.Message property conformant to the usual expectations + * [THRIFT-1431] - Rename 'sys' module to 'util' + * [THRIFT-1396] - Dephi generator has dependacy on boost 1.42 later. + * [THRIFT-1395] - Patch to prevent warnings for integer types in some cases + * [THRIFT-1275] - thrift: always prefix namespaces with " ::" + * [THRIFT-1274] - thrift: fail compilation if an unexpected token is + * [THRIFT-1271] - thrift: fix missing namespace in generated local + * [THRIFT-1270] - thrift: add --allow-neg-keys argument to allow + * [THRIFT-1345] - Allow building without tests + * [THRIFT-1286] - Modernize the Thrift Ruby Library Dev Environment + * [THRIFT-1284] - thrift: fix processor inheritance + * [THRIFT-1283] - thrift: wrap t_cpp_generator::generate_process_function() to 80 + * [THRIFT-1282] - Upgrade httpclient to 4.1.2 (from 4.0.1) + * [THRIFT-1281] - add @generated to the docblock + * [THRIFT-1280] - Thrift: Improve Monitor exception-free interfaces + * [THRIFT-1278] - javadoc warnings - compilation + * [THRIFT-1227] - Erlang implementation of thrift JSON protocol + * [THRIFT-1295] - Duplicate include in TSocket.cpp + * [THRIFT-1294] - thrift: fix log message typos in TSimpleServer + * [THRIFT-1293] - thrift: improve handling of exceptions thrown by + * [THRIFT-1292] - thrift: silence log spew from TThreadedServer + * [THRIFT-1288] - Allow typedefed exceptions in throws clauses + * [THRIFT-1290] - thrift: TNonblockingServer: clean up state in the + * [THRIFT-1287] - thrift: start refactoring some of the C++ processor + * [THRIFT-1289] - thrift: implement TNonblockingServer::stop() + * [THRIFT-1305] - thrift: make TConnection a private inner class of + * [THRIFT-1304] - TNonblockingServer: pass in the connection context to + * [THRIFT-1302] - thrift: raise an exception if send() times out in + * [THRIFT-1301] - thrift: consolidate common code in TNonblockingServer + * [THRIFT-1377] - abort PHP deserialization on unknown field type + * [THRIFT-1379] - fix uninitialized enum values in thrift C++ objects + * [THRIFT-1376] - Make port specification option in thrift remote + * [THRIFT-1375] - fixed a hex char conversion bug in TJSONProtocol + * [THRIFT-1373] - Fix user-defined exception generation in thrift (python) + * [THRIFT-1361] - Optional replacement of pthread by boost::thread + * [THRIFT-1320] - Consistency of configure generated config.h + * [THRIFT-1317] - Remove copy constructibility from + * [THRIFT-1316] - thrift: update server classes to accept + * [THRIFT-1315] - thrift: generate server interface factory classes + * [THRIFT-1314] - thrift: add TProcessorFactory + * [THRIFT-1335] - Add accept timeout to TServerSocket + * [THRIFT-1334] - Add more info to IllegalStateException + * [THRIFT-1333] - Make RWGuard not copyable + * [THRIFT-1332] - TSSLTransportParameters class uses hard coded value keyManagerType: SunX509 + * [THRIFT-1251] - Generated java code should indicate which fields are required and which are optional + * [THRIFT-1387] - Build MSVC libraries with Boost Threads instead of Pthreads + * [THRIFT-1339] - Extend Tuple Protocol to TUnions + * [THRIFT-1031] - Patch to compile Thrift for vc++ 9.0 and 10.0 + * [THRIFT-1130] - Add the ability to specify symbolic default value for optional boolean + * [THRIFT-1123] - Patch to compile Thrift server and client for vc++ 9.0 and 10.0 + * [THRIFT-386] - Make it possible to build the Python library without the extension + +## New Feature + * [THRIFT-1401] - JSON-protocol for Delphi XE Libraries + * [THRIFT-1167] - Java nonblocking server with more than one thread for select and handling IO + * [THRIFT-1366] - Delphi generator, lirbrary and unit test. + * [THRIFT-1354] - Add rake task to build just the gem file + * [THRIFT-769] - Pluggable Serializers + +## Sub-task + * [THRIFT-1415] - delphi: add version Info to the library + * [THRIFT-1391] - Improved Delphi XE test cases + + + +Thrift 0.7 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-1140] - Framed Transport Client using C (Glib) Library hangs when connecting to Ruby Server + * [THRIFT-1154] - HttpClient does not specify the connection close parameter + * [THRIFT-1153] - HttpClient does not specify the connection close parameter + * [THRIFT-1149] - Nonblocking server fails when client connection is reset + * [THRIFT-1146] - Android Incompatibility : in Android < 2.3 java.io.IOException doesn't support for Throwable parameter in constructor + * [THRIFT-1133] - Java and JavaScript tutorial is broken since we have Java maven deployment + * [THRIFT-1132] - Deserialization error in TApplicationException C# + * [THRIFT-1131] - C# JSON Protocol is unable to decode escaped characters in string + * [THRIFT-1208] - python TCompactProtocol.py writeBool and readBool not follow the compact-proto-spec-2.txt spec for CONTAINER_WRITE, CONTAINER_READ + * [THRIFT-1200] - JS compiler generates code that clobbers existing namespaces + * [THRIFT-1183] - Pure-ruby CompactProtocol raises ArgumentError when deserializing under Ruby 1.9 + * [THRIFT-1182] - Native deserializer segfaults on incorrect list element type + * [THRIFT-1181] - AS3 compiler generates incorrect code for setting default values in constructor + * [THRIFT-1234] - thrift --help is missing doc on py:utf8strings + * [THRIFT-1180] - AS3 compiler generates uncompilable code for binary types. + * [THRIFT-1194] - Java lib does not install artifacts to local dir correctly + * [THRIFT-1193] - Potential infinite loop in nonblocking_server + * [THRIFT-1192] - Typo: TProtocol.h tests for HAVE_SYS_PARAM_H_ + * [THRIFT-1190] - readBufferBytesAllocated in TNonblockingServer.java should be AtomicLong to fix FD leakage and general server malfunction + * [THRIFT-1187] - nonblocking_server shutdown race under Ruby 1.9 + * [THRIFT-1178] - Java: TBase signature should be T extends TBase + * [THRIFT-1164] - Segmentation fault on NULL pointer in t_js_generator::generate_const + * [THRIFT-1171] - Perl write/readDouble assumes little-endian platform + * [THRIFT-1222] - Unhandled exception for TEvhttpServer request + * [THRIFT-1220] - TProcessor::process never returns false + * [THRIFT-1285] - Stable 0.7.0 Windows compiler exe available on the webside is not the good one + * [THRIFT-1218] - c_glib uses wrong name in pkg-config + * [THRIFT-1215] - Undefined property Thirft in lib/js/thrift.js + * [THRIFT-1211] - When using THttpClient, non 200 responses leave the connection open + * [THRIFT-1228] - The php accelerator module calls flush incorrectly + * [THRIFT-1308] - libfb303-0.7.0.jar missing in maven repository + * [THRIFT-1255] - Mismatch of method name between JavaME's lib and generated code (compareTo/compareObjects) + * [THRIFT-1253] - Code generated for maps is not compiling + * [THRIFT-1252] - Segfault in Ruby deserializer + * [THRIFT-1094] - bug in TCompactProto python readMessageEnd method and updated test cases + * [THRIFT-1093] - several bugs in python TCompactProtocol + * [THRIFT-1092] - generated validate() method has wrong indentation + * [THRIFT-1011] - Error generating package imports when using classes from other packages + * [THRIFT-1050] - Declaring an argument named "manager" to a service method produces code that fails compile due to name conflicts with protected ivars in TAsyncClient + * [THRIFT-1074] - .keystore and .truststore are missing from the 0.6.0 distribution + * [THRIFT-1067] - Tons of bugs in php implementation + * [THRIFT-1065] - Unexpected exceptions not proper handled on JS + * [THRIFT-1076] - Erlang Thrift socket server has a bug that causes java thrift client of framed binary client to throw "out of sequence" exception + * [THRIFT-1057] - casts in TBinaryProtocol.tcc causing "dereferencing type-punned pointer will break strict-aliasing rules" warnings from gcc + * [THRIFT-1055] - csharp TServerSocket and TSocket do not disable Nagle via Socket.NoDelay = true like cpp and java do + * [THRIFT-1054] - explicit call to PKG_PROG_PKG_CONFIG is missing and first use of PKG_CHECK_MODULES may not happen, causes mono detection to fail + * [THRIFT-1117] - JavaScript Unit Test does not work anymore because libthrift*.jar where moved by Maven Deployment + * [THRIFT-1111] - The HTML generator does not distinguish between string and binary types + * [THRIFT-1032] - "make dist" fails due to c_glib problem + * [THRIFT-1036] - Auto-generated C++ code fails to compile with "-Werror -Wextra -Wall" g++ compiler flags + * [THRIFT-1041] - TDeserializer holds onto a reference of the array it reads after it is done deserializing + * [THRIFT-1106] - C++ code TAsyncProtocolProcessor.h & TAsyncBufferProcessor.h dont have virtual functions but no virtual destructor. Causes warnings on -Wall + * [THRIFT-1105] - OCaml generator does not prefix methods of included structs with their type + * [THRIFT-1104] - INSTALLDIRS should be included in configure script + * [THRIFT-1102] - typo in configure.ac: "==" operator in 'test' (instead of"'=") + * [THRIFT-1101] - bytebuffer length calculation in TBinaryProtocol writeBinary + * [THRIFT-1098] - Undefined properties in TBinaryProtocolFactory + * [THRIFT-1081] - PHP tests broken and somewhat incomplete + * [THRIFT-1080] - erlang test's 'make' fails on Mac OSX + * [THRIFT-1078] - ThriftTest.thrift generates invalid PHP library + * [THRIFT-1120] - proto.WriteListEnd being called in the wrong place + * [THRIFT-1119] - TJSONProtocol fails to UTF8 decode strings + * [THRIFT-867] - PHP accelerator module's output transport is incompatible with TFramedTransport + * [THRIFT-826] - PHP TSocket Write Timeout + * [THRIFT-835] - Bad AS3 syntax in constructors that set default values + * [THRIFT-788] - thrift_protocol.so: multiget/multiget_slice does not handle more than 17 keys correctly + * [THRIFT-125] - OCaml libraries don't compile with 32-bit ocaml + * [THRIFT-342] - PHP: can't have sets of complex types + * [THRIFT-731] - configure doesn't check for ant >= 1.7 + * [THRIFT-690] - Update TApplicationException codes + * [THRIFT-638] - BufferedTransport + C extensions block until recv timeout is reached on last fread call + +## Dependency upgrade + * [THRIFT-1177] - Update thrift to reflect changes in Go's networking libraries + +## Improvement + * [THRIFT-1155] - Remove log4j dependency from java client + * [THRIFT-1151] - Produce more informative runtime error in case of schema and data mismatch during serialization + * [THRIFT-1207] - Support DESTDIR on "make install" of ruby libs + * [THRIFT-1199] - Union structs should have generated methods to test whether a specific field is currently set + * [THRIFT-1233] - Remove unused include in generated C++ code + * [THRIFT-1189] - Ruby deserializer speed improvements + * [THRIFT-1170] - Thrift Generated Code and Java 5 + * [THRIFT-1174] - Publish as3 client implementation via Maven for use by flex-mojos users + * [THRIFT-1225] - TCompactProtocol for PHP + * [THRIFT-1221] - Remove SimpleCallback.h + * [THRIFT-1217] - Use evutil_socketpair instead of pipe (Windows port) + * [THRIFT-1216] - build Java Library behind a proxy + * [THRIFT-1231] - Remove bogus include + * [THRIFT-1213] - Membuffer should provide a way to get back the buffer + * [THRIFT-1237] - Java fb303 missing some methods + * [THRIFT-1063] - Fix Erlang Tutorial Files + * [THRIFT-1053] - Make remote client's IP address available for all socket related transports + * [THRIFT-1109] - Deploy fb303 along side libthrift to maven repo + * [THRIFT-1107] - improvement for compiler-generated python for 'None' object comparisons + * [THRIFT-1069] - Add command line option to prevent thrift from inserting gen-* directories + * [THRIFT-1049] - Allow for TServerSocket python library to bind to a specific host + * [THRIFT-1126] - Extending struct_info for erlang bindings + * [THRIFT-1100] - python TSSLSocket improvements, including certificate validation + * [THRIFT-994] - Don't try to invoke phpize if we don't have it + * [THRIFT-993] - Some improvements in C++ stubs for oneway operations + * [THRIFT-997] - Using valueOf for base types in getFieldValue + * [THRIFT-418] - Don't do runtime sorting of struct fields + * [THRIFT-151] - TSSLServerSocket and TSSLSocket implementation + * [THRIFT-27] - Generated erlang types don't contain default values for records + * [THRIFT-113] - to-string methods should omit optional null fields from output + * [THRIFT-363] - Maven Deploy + * [THRIFT-447] - Make an abstract base Client class so we can generate less code + * [THRIFT-627] - should c++ have setters for optional fields? + +## New Feature + * [THRIFT-1236] - Erlang Reconnecting Thrift Client + * [THRIFT-1021] - Framed transport support for OCaml + * [THRIFT-1068] - Python SSL Socket Support + * [THRIFT-1103] - TZlibTransport for python, a zlib compressed transport + * [THRIFT-1083] - Preforking python process pool server + * [THRIFT-999] - Add TForkingServer + +## Sub-task + * [THRIFT-1152] - Attributes from private to protected + * [THRIFT-1038] - Generated Java code for structures containing binary fields (or collections thereof) are not serializable (in the Java sense) even though they implement java.io.Serializable + +## Task + * [THRIFT-892] - Refactor erlang build system with rebar + +## Wish + * [THRIFT-625] - Add support for 'Go' + + + +Thrift 0.6.1 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-1133] - Java and JavaScript tutorial is broken since we have Java maven deployment + * [THRIFT-1131] - C# JSON Protocol is unable to decode escaped characters in string + * [THRIFT-1074] - .keystore and .truststore are missing from the 0.6.0 distribution + +## Improvement + * [THRIFT-1109] - Deploy fb303 along side libthrift to maven repo + * [THRIFT-363] - Maven Deploy + +## Question + * [THRIFT-1206] - did the THRIFT 0.6.1 merge THRIFT-563 ? + +## Sub-task + * [THRIFT-1163] - How can i use multi service in one program? + +## Task + * [THRIFT-1112] - Apply THRIFT-363 to 0.6 branch + * [THRIFT-1113] - Apply THRIFT-1074 to 0.6 branch + + + +Thrift 0.6 +-------------------------------------------------------------------------------- +## Bug + * [THRIFT-1020] - OCaml compiler generates invalid OCaml + * [THRIFT-1015] - TUnion does not handle ByteBuffer in toString + * [THRIFT-1013] - generated java code may have name clashes with thrift library + * [THRIFT-1009] - TUnion does not correctly deep copy a ByteBuffer + * [THRIFT-1032] - "make dist" fails due to c_glib problem + * [THRIFT-868] - Referencing constant values doesn't work with with typedef types + * [THRIFT-971] - java module can't be compiled without ivy and network connection + * [THRIFT-970] - Under heavy load, THttpClient may fail with "too many open files" + * [THRIFT-969] - Java Tutorial broken, move CalculatorHandler to a separate file + * [THRIFT-807] - JavaScript: Initialization of Base Types with 0 instead of null + * [THRIFT-955] - Thrift compiler for Windows uses lowercase names and directories which is inconsistent with compiling on other platforms + * [THRIFT-992] - Naming convention in C# constructor is not consistent with other fields causes compile errors + * [THRIFT-1008] - byte[] accessors throw NPE on unset field + * [THRIFT-1006] - Impossible to correctly qualify an enum constant in an external thrift file + * [THRIFT-950] - Haskell bindings treat 'byte' as unsigned 8-bit int (Data.Word.Word8), java/cpp as signed (byte/int8_t). + * [THRIFT-975] - lib/c_glib/README is missing => breaks make dist + * [THRIFT-944] - Support all version-4s of base + * [THRIFT-939] - optional binary fields throw NPE on default byte[] getters + * [THRIFT-935] - PHP Extension aborts the build if php-config is not installed + * [THRIFT-933] - Haskell's Thrift.cabal has warnings + * [THRIFT-932] - Haskell tests need to be run through 'make check' (and probably 'cabal check') too + * [THRIFT-904] - C# TSocket should disable nagle and linger + * [THRIFT-941] - Make PHP C Extension use the defined Protocol writeMessageBegin function + * [THRIFT-940] - 'make check' fails if boost is not in the std include and link paths + * [THRIFT-924] - Fix generated php structure constants + * [THRIFT-979] - ruby bindings used to work on jruby + * [THRIFT-977] - Hex Conversion Bug in C++ TJSONProtocol + * [THRIFT-347] - PHP TSocket Timeout Issues + * [THRIFT-517] - TExceptions thrown by server result in cryptic error message on client - Tried to read 4 bytes, but only got 0 bytes + +## Improvement + * [THRIFT-1024] - Add Python Twisted example to the Tutorial + * [THRIFT-958] - Change accessmodifer on trans_ field in the FrameBuffer class to public. + * [THRIFT-957] - THsHaServer: Change access modifier of the invoker field. + * [THRIFT-1002] - CodeStyle: t_c_glib_generator.cc + * [THRIFT-1005] - Give unions byte[] signature methods to go along with their ByteBuffer counterparts + * [THRIFT-951] - Add a new isServing() method to TServer + * [THRIFT-943] - Silly readme typo fix. + * [THRIFT-961] - JavaScript TestSuite using ant/ivy and Java's ServerTestBase Handler + * [THRIFT-960] - add TestServer, TestNonblockingServer and TestClient again + * [THRIFT-949] - Modify the TEnum interface so it defines a method similar to findByValue + * [THRIFT-946] - Augment FieldValueMetaData so it differentiates 'string' and 'binary' fields. + * [THRIFT-903] - custom ThreadFactory in THsHaServer + * [THRIFT-913] - Test Case for Url encoded strings + simple enhancement to lib/js/test/RunTestServer.sh + * [THRIFT-926] - Miscellaneous C++ improvements + * [THRIFT-929] - Improvements to the C++ test suite + * [THRIFT-893] - add JavaScript to the tutorial examples + * [THRIFT-1003] - Polishing c_glib code + * [THRIFT-71] - Debian packaging for thrift + +## New Feature + * [THRIFT-1033] - Node.js language target + * [THRIFT-947] - Provide a helper method to determine the TProtocol used to serialize some data. + * [THRIFT-928] - Make more statistics available in C++ servers + * [THRIFT-922] - Templatized [de]serialization code for C++ + * [THRIFT-923] - Event-driven client and server support for C++ + * [THRIFT-925] - Provide name<->value map for enums in C++ + * [THRIFT-927] - Add option to modify the PHP include path + * [THRIFT-377] - TFileTransport port in Java + * [THRIFT-106] - TSSLServerSocket + * [THRIFT-582] - C implementation of Thrift + * [THRIFT-745] - Make it easier to instantiate servers + +## Sub-task + * [THRIFT-1038] - Generated Java code for structures containing binary fields (or collections thereof) are not serializable (in the Java sense) even though they implement java.io.Serializable + +## Task + * [THRIFT-862] - Async client issues / improvements + +## Test + * [THRIFT-581] - Add a testsuite for txThrift (Twisted) + + + +Thrift 0.5.0 - Incubating +-------------------------------------------------------------------------------- +THRIFT-505 Build Make configure give a summary of the enabled components (David Reiss) +THRIFT-506 Build Allow Thrift to be built without the C++ library (David Reiss) +THRIFT-844 Build Build Requirements state autoconf 2.59+ is required, but 2.60+ is needed (Harlan Lieberman-Berg) +THRIFT-850 Build Perl runtime requires Bit::Vector which may not be installed by default, but configure does not fail (Michael Lum) +THRIFT-854 Build Provide configure option and make rules to build/install php extension (Anthony Molinaro) +THRIFT-858 Build Have bootstrap.sh check for a suitable autoconf version before running (David Reiss) +THRIFT-871 Build Thrift compiler for WIndows (binary distribution) (David Reiss) +THRIFT-323 C# TJSONProtocol (Roger Meier) +THRIFT-634 C# C# Compiler Generates Incorrect Code For Fields which begin with an uppercase letter (Jon S Akhtar) +THRIFT-881 C# add csharp to the tutorial (Roger Meier) +THRIFT-856 C++ Building cpp library fails on OS X with malloc and free not being declared in scope (James Clarke) +THRIFT-865 C++ C++ compiler build depends on libfl even when flex/lex not detected (David Reiss) +THRIFT-900 C++ Unix domain socket (Roger Meier) +THRIFT-920 C++ C++ Test and Tutorial does not compile anymore due to the change within Enum handling (Roger Meier) +THRIFT-567 C++ Can't immediately stop a TSimpleServer thread that is idle (Rush Manbert) +THRIFT-756 C++ Exposing TSocket(int) constructor to public (Rajat Goel) +THRIFT-798 C++ TNonblockingServer leaks resources when destroyed (David Reiss) +THRIFT-812 C++, Python Demo of Thrift over ZeroMQ (David Reiss) +THRIFT-629 Cocoa Unused Field In TSocketServer Appears To Break iPhone Build (Jon S Akhtar) +THRIFT-838 Cocoa Generated Cocoa classes have useless @dynamic declarations (Kevin Ballard) +THRIFT-805 Cocoa Don't generate process_XXXX methods for oneway methods (Brad Taylor) +THRIFT-507 Compiler Remove the compiler's dependency on Boost (David Reiss) +THRIFT-895 Compiler (General) Thrift compiler does not allow two different enumerations to have the same key name for one of the enum values (David Reiss) +THRIFT-852 Compiler (General) Missing newline causes many compiler warnings (Anthony Molinaro) +THRIFT-877 Compiler (General) smalltalk namespace doesn't work (Bruce Lowekamp) +THRIFT-897 Compiler (General) Don't allow unqualified constant access to enum values (Bryan Duxbury) +THRIFT-9 Compiler (General) Add a default namespace declaration for all languages (David Reiss) +THRIFT-599 Erlang Don't use unnecessary processes in the Erlang transports and clients (David Reiss) +THRIFT-646 Erlang Erlang library is missing install target (David Reiss) +THRIFT-698 Erlang Generated module list should contain atoms, not strings (Anthony Molinaro) +THRIFT-866 Erlang term() in spec definitions seems to not work in erlang R12 (Anthony Molinaro) +THRIFT-886 Erlang Dialyzer warning (Anthony Molinaro) +THRIFT-785 Erlang Framed transport server problems (Anthony Molinaro) +THRIFT-884 HTML HTML Generator: add Key attribute to the Data Types Tables (Roger Meier) +THRIFT-652 Haskell Generated field name for strut is not capitalized correctly (Christian Lavoie) +THRIFT-743 Haskell compile error with GHC 6.12.1 (Christian Lavoie) +THRIFT-901 Haskell Allow the bindings to compile without -fglasgow-exts and with -Wall -Werror (Christian Lavoie) +THRIFT-905 Haskell Make haskell thrift bindings use automake to compile and install (Christian Lavoie) +THRIFT-906 Haskell Improve type mappings (Christian Lavoie) +THRIFT-914 Haskell Make haskell bindings 'easily' compilable (Christian Lavoie) +THRIFT-918 Haskell Make haskell tests run again (Christian Lavoie) +THRIFT-919 Haskell Update Haskell bindings README (Christian Lavoie) +THRIFT-787 Haskell Enums are not read correctly (Christian Lavoie) +THRIFT-250 Java ExecutorService as a constructor parameter for TServer (Ed Ceaser) +THRIFT-693 Java Thrift compiler generated java code that throws compiler warnings about deprecated methods. (Bryan Duxbury) +THRIFT-843 Java TNonblockingSocket connects without a timeout (Bryan Duxbury) +THRIFT-845 Java async client does not respect timeout (Ning Liang) +THRIFT-870 Java Java constants don't get Javadoc comments (Bryan Duxbury) +THRIFT-873 Java Java tests fail due to Too many open files (Todd Lipcon) +THRIFT-876 Java Add SASL support (Aaron T. Myers) +THRIFT-879 Java Remove @Override from TUnion.clear (Dave Engberg) +THRIFT-882 Java deep copy of binary fields does not copy ByteBuffer characteristics (arrayOffset, position) (Bryan Duxbury) +THRIFT-888 Java async client should also have nonblocking connect (Eric Jensen) +THRIFT-890 Java Java tutorial doesn't work (Todd Lipcon) +THRIFT-894 Java Make default accessors for binary fields return byte[]; provide new accessors to get ByteBuffer version (Bryan Duxbury) +THRIFT-896 Java TNonblockingSocket.isOpen() returns true even after close() (Eric Jensen) +THRIFT-907 Java libfb303 doesn't compile in 0.4.0 (Todd Lipcon) +THRIFT-912 Java Improvements and bug fixes to SASL implementation (Todd Lipcon) +THRIFT-917 Java THsHaServer should not accept an ExecutorService without catching RejectedExecutionException (Ed Ceaser) +THRIFT-931 Java Use log4j for Java tests (Todd Lipcon) +THRIFT-880 JavaME JavaME code generator and runtime library (Dave Engberg) +THRIFT-846 JavaScript JavaScript Test Framwork: extended Testcases (Roger Meier) +THRIFT-885 JavaScript Url encoded strings never get decoded? How do we fix this? (T Jake Luciani) +THRIFT-911 JavaScript (JavaScript compiler) Const structs, maps, sets, and lists generate a trailing comma (T Jake Luciani) +THRIFT-860 OCaml copy method and reset method (Lev Walkin) +THRIFT-682 PHP PHP extension doesn't compile on Mac OS X (Bryan Duxbury) +THRIFT-851 PHP php extension fails to compile on centos 5.x (Todd Lipcon) +THRIFT-840 Perl Perl protocol handler could be more robust against unrecognised types (Conrad Hughes) +THRIFT-758 Perl incorrect deference in exception handling (Yann Kerherve) +THRIFT-257 Python Support validation of required fields (Esteve Fernandez) +THRIFT-335 Python Compact Protocol for Python (David Reiss) +THRIFT-596 Python Make Python's TBufferedTransport use a configurable input buffer (David Reiss) +THRIFT-597 Python Python THttpServer performance improvements (David Reiss) +THRIFT-598 Python Allow Python's threading servers to use daemon threads (David Reiss) +THRIFT-666 Python Allow the handler to override HTTP responses in THttpServer (David Reiss) +THRIFT-673 Python Generated Python code has whitespace issues (Ian Eure) +THRIFT-721 Python THttpClient ignores url parameters (Thomas Kho) +THRIFT-824 Python TApplicationException.__str__() refers to class constants as globals (Peter Schuller) +THRIFT-855 Python Include optimized compiled python objects in install (Anthony Molinaro) +THRIFT-859 Python Allow py:twisted to be generated in different namespace than py (Bruce Lowekamp) +THRIFT-869 Python TSocket.py on Mac (and FreeBSD) doesn't handle ECONNRESET from recv() (Steven Knight) +THRIFT-875 Python Include python setup.cfg in dist (Anthony Molinaro) +THRIFT-610 Ruby binary_protocol.rb segfaults [line 86] (Unassigned) +THRIFT-899 Ruby Ruby read timeouts can sometimes be 2x what they should be (Ryan King) +THRIFT-909 Ruby allow block argument to struct constructor (Michael Stockton) +THRIFT-456 Test Suite Bad IP address string in test/cpp/src/main.cpp (Rush Manbert) + + +Thrift 0.4.0 - Incubating +-------------------------------------------------------------------------------- +THRIFT-650 Build Make Check fails on Centos/OSX with 0.2.0 tarball (Anthony Molinaro) +THRIFT-770 Build Get 'make dist' to work without first compiling source code (Anthony Molinaro) +THRIFT-160 C# Created THttpTransport for the C# library based on WebHttpRequest (Michael Greene) +THRIFT-834 C# THttpClient resends contents of message after transport errors (Anatoly Fayngelerin) +THRIFT-247 C++ THttpServer Transport (Unassigned) +THRIFT-676 C++ Change C++ code generator so that generated classes can be wrapped with SWIG (Unassigned) +THRIFT-570 Compiler Thrift compiler does not error when duplicate method names are present (Bruce Simpson) +THRIFT-808 Compiler Segfault when constant declaration references a struct field that doesn't exist (Bryan Duxbury) +THRIFT-646 Erlang Erlang library is missing install target (Anthony Molinaro) +THRIFT-544 General multiple enums with the same key generate invalid code (Ben Taitelbaum) +THRIFT-434 General ruby compiler should warn when a reserved word is used (Michael Stockton) +THRIFT-799 General Files missing proper Apache license header (Bryan Duxbury) +THRIFT-832 HTML HTML generator shows unspecified struct fields as 'required' (Bryan Duxbury) +THRIFT-226 Java Collections with binary keys or values break equals() (Bryan Duxbury) +THRIFT-484 Java Ability to use a slice of a buffer instead of a direct byte[] for binary fields (Bryan Duxbury) +THRIFT-714 Java maxWorkerThreads parameter to THsHaServer has no effect (Bryan Duxbury) +THRIFT-751 Java Add clear() method to TBase (Bryan Duxbury) +THRIFT-765 Java Improved string encoding and decoding performance (Bryan Duxbury) +THRIFT-768 Java Async client for Java (Bryan Duxbury) +THRIFT-774 Java TDeserializer should provide a partialDeserialize method for primitive types (Piotr Kozikowski) +THRIFT-783 Java .equals java method is broken on structs containing binary-type fields (Unassigned) +THRIFT-804 Java CompareTo is broken for unions set to map, set, or list (Bryan Duxbury) +THRIFT-814 Java Include a TServlet in the standard Thrift distribution (Mathias Herberts) +THRIFT-818 Java Async client doesn't send method args (Bryan Duxbury) +THRIFT-830 Java Switch binary field implementation from byte[] to ByteBuffer (Bryan Duxbury) +THRIFT-831 Java FramedTransport implementation that reuses its buffers (Bryan Duxbury) +THRIFT-833 Java build.xml in lib/java is missing a classpathref attribute for the javadoc task (Bryan Duxbury) +THRIFT-836 Java Race condition causes CancelledKeyException in TAsyncClientManager (Bryan Duxbury) +THRIFT-842 Java Upgrade to current version of commons-lang (2.5 instead of 2.4) and/or change dependency in ivy.xml to not be exact (Bryan Duxbury) +THRIFT-815 JavaScript Deserialization of lists is critically broken. (T Jake Luciani) +THRIFT-827 OCaml OCaml generator to take default values into account (Lev Walkin) +THRIFT-647 PHP PHP library is missing install target (Anthony Molinaro) +THRIFT-682 PHP PHP extension doesn't compile on Mac OS X (Bryan Duxbury) +THRIFT-718 PHP Thrift PHP library includes closing tags and extraneous whitespace (Nicholas Telford) +THRIFT-778 PHP PHP socket listening server (Nick Jones) +THRIFT-780 PHP PHP extension sometimes causes an abort with two exceptions at the same time (David Reiss) +THRIFT-837 PHP PHP accelerator bug for writes > 8k (Thomas Kho) +THRIFT-782 Perl Perl code for writing containers doesn't count length of write*Begin or write*End (Conrad Hughes) +THRIFT-395 Python Python library + compiler does not support unicode strings (Unassigned) +THRIFT-133 Ruby 'namespace ruby' should error out, or be an alias to 'namespace rb' (Bryan Duxbury) +THRIFT-664 Ruby Ruby extension fails to build with Ruby 1.9.1 (Rajesh Malepati) +THRIFT-699 Ruby Excise unused "native protocol method table" stuff from thrift_native (Bryan Duxbury) +THRIFT-767 Ruby ruby compiler does not keep comments for enum values (Bryan Duxbury) +THRIFT-811 Ruby http_client_transport.rb: allow custom http headers (Tony Kamenick) +THRIFT-459 Ruby Ruby installation always tries to write to /Library/Ruby/site (Matthieu Imbert) + + +Thrift 0.1.0 - Incubating (not released) +-------------------------------------------------------------------------------- +Compatibility Breaking Changes: + C++: + * It's quite possible that regenerating code and rebuilding will be + required. Make sure your headers match your libs! + + Java: + + Python: + + Ruby: + * Generated files now have underscored names [THRIFT-421] + * The library has been rearranged to be more Ruby-like [THRIFT-276] + + Erlang: + * Generated code will have to be regenerated, and the new code will + have to be deployed atomically with the new library code [THRIFT-136] + +New Features and Bug Fixes: + C++: + * Support for TCompactProtocol [THRIFT-333] + + Java: + * Support for TCompactProtocol [THRIFT-110] + + Python: + * Support for Twisted [THRIFT-148] + + Ruby: + * Support for TCompactProtocol [THRIFT-332] + diff --git a/vendor/github.com/apache/thrift/CMakeLists.txt b/vendor/github.com/apache/thrift/CMakeLists.txt new file mode 100644 index 0000000000..93ed8d2ac5 --- /dev/null +++ b/vendor/github.com/apache/thrift/CMakeLists.txt @@ -0,0 +1,117 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +cmake_minimum_required(VERSION 2.8.12) + +project("Apache Thrift") + +set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH}" "${CMAKE_CURRENT_SOURCE_DIR}/build/cmake") + +# TODO: add `git rev-parse --short HEAD` +# Read the version information from the Autoconf file +file (STRINGS "${CMAKE_CURRENT_SOURCE_DIR}/configure.ac" CONFIGURE_AC REGEX "AC_INIT\\(.*\\)" ) + +# The following variable is used in the version.h.in file +string(REGEX REPLACE "AC_INIT\\(\\[.*\\], \\[([0-9]+\\.[0-9]+\\.[0-9]+(-dev)?)\\]\\)" "\\1" PACKAGE_VERSION ${CONFIGURE_AC}) +message(STATUS "Parsed Thrift package version: ${PACKAGE_VERSION}") + +# These are internal to CMake +string(REGEX REPLACE "([0-9]+\\.[0-9]+\\.[0-9]+)(-dev)?" "\\1" thrift_VERSION ${PACKAGE_VERSION}) +string(REGEX REPLACE "([0-9]+)\\.[0-9]+\\.[0-9]+" "\\1" thrift_VERSION_MAJOR ${thrift_VERSION}) +string(REGEX REPLACE "[0-9]+\\.([0-9])+\\.[0-9]+" "\\1" thrift_VERSION_MINOR ${thrift_VERSION}) +string(REGEX REPLACE "[0-9]+\\.[0-9]+\\.([0-9]+)" "\\1" thrift_VERSION_PATCH ${thrift_VERSION}) +message(STATUS "Parsed Thrift version: ${thrift_VERSION} (${thrift_VERSION_MAJOR}.${thrift_VERSION_MINOR}.${thrift_VERSION_PATCH})") + +# Some default settings +include(DefineCMakeDefaults) + +# Build time options are defined here +include(DefineOptions) +include(DefineInstallationPaths) + +# Based on the options set some platform specifics +include(DefinePlatformSpecifc) + +# Generate the config.h file +include(ConfigureChecks) + +# Package it +include(CPackConfig) + + +find_package(Threads) + +include(CTest) +if(BUILD_TESTING) + message(STATUS "Building with unittests") + + enable_testing() + # Define "make check" as alias for "make test" + add_custom_target(check COMMAND ctest) +else () + message(STATUS "Building without tests") +endif () + +if(BUILD_COMPILER) + if(NOT EXISTS ${THRIFT_COMPILER}) + set(THRIFT_COMPILER $) + endif() + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/compiler/cpp) +elseif(EXISTS ${THRIFT_COMPILER}) + add_executable(thrift-compiler IMPORTED) + set_property(TARGET thrift-compiler PROPERTY IMPORTED_LOCATION ${THRIFT_COMPILER}) +endif() + +if(BUILD_CPP) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/cpp) + if(BUILD_TUTORIALS) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/tutorial/cpp) + endif() + if(BUILD_TESTING) + if(WITH_LIBEVENT AND WITH_ZLIB AND WITH_OPENSSL) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test/cpp) + else() + message(WARNING "libevent and/or ZLIB and/or OpenSSL not found or disabled; will not build some tests") + endif() + endif() +endif() + +if(BUILD_C_GLIB) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/c_glib) +endif() + +if(BUILD_JAVA) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/java) +endif() + +if(BUILD_PYTHON) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/py) + if(BUILD_TESTING) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test/py) + endif() +endif() + +if(BUILD_HASKELL) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lib/hs) + if(BUILD_TESTING) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/test/hs) + endif() +endif() + +PRINT_CONFIG_SUMMARY() diff --git a/vendor/github.com/apache/thrift/CONTRIBUTING.md b/vendor/github.com/apache/thrift/CONTRIBUTING.md new file mode 100644 index 0000000000..316da9a00d --- /dev/null +++ b/vendor/github.com/apache/thrift/CONTRIBUTING.md @@ -0,0 +1,49 @@ +## How to contribute + 1. Help to review and verify existing patches + 1. Make sure your issue is not all ready in the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT) + 1. If not, create a ticket describing the change you're proposing in the [Jira issue tracker](http://issues.apache.org/jira/browse/THRIFT) + 1. Contribute your patch using one of the two methods below + +### Contributing via a patch + +1. Check out the latest version of the source code + + * git clone https://git-wip-us.apache.org/repos/asf/thrift.git thrift + +1. Modify the source to include the improvement/bugfix + + * Remember to provide *tests* for all submited changes + * When bugfixing: add test that will isolate bug *before* applying change that fixes it + * Verify that you follow [Thrift Coding Standards](/docs/coding_standards) (you can run 'make style', which ensures proper format for some languages) + +1. Create a patch from project root directory (e.g. you@dev:~/thrift $ ): + + * git diff > ../thrift-XXX-my-new-feature.patch + +1. Attach the newly generated patch to the issue +1. Wait for other contributors or committers to review your new addition +1. Wait for a committer to commit your patch + +### Contributing via GitHub pull requests + +1. Create a fork for http://github.com/apache/thrift +1. Create a branch for your changes(best practice is issue as branch name, e.g. THRIFT-9999) +1. Modify the source to include the improvement/bugfix + + * Remember to provide *tests* for all submited changes + * When bugfixing: add test that will isolate bug *before* applying change that fixes it + * Verify that you follow [Thrift Coding Standards](/docs/coding_standards) (you can run 'make style', which ensures proper format for some languages) + * Verify that your change works on other platforms by adding a GitHub service hook to [Travis CI](http://docs.travis-ci.com/user/getting-started/#Step-one%3A-Sign-in) and [AppVeyor](http://www.appveyor.com/docs) + +1. Commit and push changes to your branch (please use issue name and description as commit title, e.g. THRIFT-9999 make it perfect) +1. Issue a pull request with the jira ticket number you are working on in it's name +1. Wait for other contributors or committers to review your new addition +1. Wait for a committer to commit your patch + +### More info + + Plenty of information on why and how to contribute is available on the Apache Software Foundation (ASF) web site. In particular, we recommend the following: + + * [Contributors Tech Guide](http://www.apache.org/dev/contributors) + * [Get involved!](http://www.apache.org/foundation/getinvolved.html) + * [Legal aspects on Submission of Contributions (Patches)](http://www.apache.org/licenses/LICENSE-2.0.html#contributions) diff --git a/vendor/github.com/apache/thrift/Dockerfile b/vendor/github.com/apache/thrift/Dockerfile new file mode 100644 index 0000000000..0d7ad2175e --- /dev/null +++ b/vendor/github.com/apache/thrift/Dockerfile @@ -0,0 +1,61 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# Goal: provide a thrift-compiler Docker image +# +# Usage: +# docker run -v "${PWD}:/data" thrift/thrift-compiler -gen cpp -o /data/ /data/test/ThriftTest.thrift +# +# further details on docker for thrift is here build/docker/ +# +# TODO: push to apache/thrift-compiler instead of thrift/thrift-compiler + +FROM debian:jessie +MAINTAINER Apache Thrift + +ENV DEBIAN_FRONTEND noninteractive + +ADD . /thrift + +RUN buildDeps=" \ + flex \ + bison \ + g++ \ + make \ + cmake \ + curl \ + "; \ + apt-get update && apt-get install -y --no-install-recommends $buildDeps \ + && mkdir /tmp/cmake-build && cd /tmp/cmake-build \ + && cmake \ + -DBUILD_COMPILER=ON \ + -DBUILD_LIBRARIES=OFF \ + -DBUILD_TESTING=OFF \ + -DBUILD_EXAMPLES=OFF \ + /thrift \ + && cmake --build . --config Release \ + && make install \ + && curl -k -sSL "https://storage.googleapis.com/golang/go1.5.2.linux-amd64.tar.gz" -o /tmp/go.tar.gz \ + && tar xzf /tmp/go.tar.gz -C /tmp \ + && cp /tmp/go/bin/gofmt /usr/bin/gofmt \ + && apt-get purge -y --auto-remove $buildDeps \ + && apt-get clean \ + && rm -rf /tmp/* \ + && rm -rf /var/lib/apt/lists/* + +ENTRYPOINT ["thrift"] diff --git a/vendor/github.com/apache/thrift/Makefile.am b/vendor/github.com/apache/thrift/Makefile.am new file mode 100755 index 0000000000..ed58265ace --- /dev/null +++ b/vendor/github.com/apache/thrift/Makefile.am @@ -0,0 +1,131 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +ACLOCAL_AMFLAGS = -I ./aclocal + +if WITH_PLUGIN +# To enable bootstrap, build order is lib/cpp -> compiler -> others +SUBDIRS = lib/cpp compiler/cpp lib +if WITH_TESTS +SUBDIRS += lib/cpp/test +endif +else +SUBDIRS = compiler/cpp lib +endif + +if WITH_TESTS +SUBDIRS += test +endif + +if WITH_TUTORIAL +SUBDIRS += tutorial +endif + +dist-hook: + find $(distdir) -type f \( -iname ".DS_Store" -or -iname "._*" -or -iname ".gitignore" \) | xargs rm -rf + find $(distdir) -type d \( -iname ".deps" -or -iname ".libs" \) | xargs rm -rf + find $(distdir) -type d \( -iname ".svn" -or -iname ".git" \) | xargs rm -rf + +print-version: + @echo $(VERSION) + +.PHONY: precross cross +precross-%: all + $(MAKE) -C $* precross +precross: all precross-test precross-lib + +empty := +space := $(empty) $(empty) +comma := , + +CROSS_LANGS = @MAYBE_CPP@ @MAYBE_C_GLIB@ @MAYBE_D@ @MAYBE_JAVA@ @MAYBE_CSHARP@ @MAYBE_PYTHON@ @MAYBE_PY3@ @MAYBE_RUBY@ @MAYBE_HASKELL@ @MAYBE_PERL@ @MAYBE_PHP@ @MAYBE_GO@ @MAYBE_NODEJS@ @MAYBE_DART@ @MAYBE_ERLANG@ @MAYBE_LUA@ +CROSS_LANGS_COMMA_SEPARATED = $(subst $(space),$(comma),$(CROSS_LANGS)) + +if WITH_PY3 +CROSS_PY=$(PYTHON3) +else +CROSS_PY=$(PYTHON) +endif + +if WITH_PYTHON +crossfeature: precross + $(CROSS_PY) test/test.py --retry-count 3 --features .* --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED) +else +# feature test needs python build +crossfeature: +endif + +cross-%: precross crossfeature + $(CROSS_PY) test/test.py --retry-count 3 --skip-known-failures --server $(CROSS_LANGS_COMMA_SEPARATED) --client $(CROSS_LANGS_COMMA_SEPARATED) --regex "$*" + +cross: cross-.* + +TIMES = 1 2 3 +fail: precross + $(CROSS_PY) test/test.py || true + $(CROSS_PY) test/test.py --update-expected-failures=overwrite + $(foreach var,$(TIMES),test/test.py -s || true;test/test.py --update-expected-failures=merge;) + +codespell_skip_files = \ + *.jar \ + *.class \ + *.so \ + *.a \ + *.la \ + *.o \ + *.p12 \ + *OCamlMakefile \ + .keystore \ + .truststore \ + CHANGES \ + config.sub \ + configure \ + depcomp \ + libtool.m4 \ + output.* \ + rebar \ + thrift + +skipped_files = $(subst $(space),$(comma),$(codespell_skip_files)) + +style-local: + codespell --write-changes --skip=$(skipped_files) --disable-colors + +EXTRA_DIST = \ + .clang-format \ + .editorconfig \ + .travis.yml \ + appveyor.yml \ + bower.json \ + build \ + CMakeLists.txt \ + composer.json \ + contrib \ + CONTRIBUTING.md \ + debian \ + doc \ + doap.rdf \ + package.json \ + sonar-project.properties \ + Dockerfile \ + LICENSE \ + CHANGES \ + NOTICE \ + README.md \ + Thrift.podspec diff --git a/vendor/github.com/apache/thrift/README.md b/vendor/github.com/apache/thrift/README.md new file mode 100644 index 0000000000..07cd32f098 --- /dev/null +++ b/vendor/github.com/apache/thrift/README.md @@ -0,0 +1,166 @@ +Apache Thrift +============= + ++[![Build Status](https://travis-ci.org/apache/thrift.svg?branch=master)](https://travis-ci.org/apache/thrift) +- +[![AppVeyor Build status](https://ci.appveyor.com/api/projects/status/e2qks7enyp9gw7ma?svg=true)](https://ci.appveyor.com/project/apache/thrift) + + +Introduction +============ + +Thrift is a lightweight, language-independent software stack with an +associated code generation mechanism for RPC. Thrift provides clean +abstractions for data transport, data serialization, and application +level processing. The code generation system takes a simple definition +language as its input and generates code across programming languages that +uses the abstracted stack to build interoperable RPC clients and servers. + +Thrift is specifically designed to support non-atomic version changes +across client and server code. + +For more details on Thrift's design and implementation, take a gander at +the Thrift whitepaper included in this distribution or at the README.md files +in your particular subdirectory of interest. + +Hierarchy +========= + +thrift/ + + compiler/ + + Contains the Thrift compiler, implemented in C++. + + lib/ + + Contains the Thrift software library implementation, subdivided by + language of implementation. + + cpp/ + go/ + java/ + php/ + py/ + rb/ + + test/ + + Contains sample Thrift files and test code across the target programming + languages. + + tutorial/ + + Contains a basic tutorial that will teach you how to develop software + using Thrift. + +Requirements +============ + +See http://thrift.apache.org/docs/install for an up-to-date list of build requirements. + +Resources +========= + +More information about Thrift can be obtained on the Thrift webpage at: + + http://thrift.apache.org + +Acknowledgments +=============== + +Thrift was inspired by pillar, a lightweight RPC tool written by Adam D'Angelo, +and also by Google's protocol buffers. + +Installation +============ + +If you are building from the first time out of the source repository, you will +need to generate the configure scripts. (This is not necessary if you +downloaded a tarball.) From the top directory, do: + + ./bootstrap.sh + +Once the configure scripts are generated, thrift can be configured. +From the top directory, do: + + ./configure + +You may need to specify the location of the boost files explicitly. +If you installed boost in /usr/local, you would run configure as follows: + + ./configure --with-boost=/usr/local + +Note that by default the thrift C++ library is typically built with debugging +symbols included. If you want to customize these options you should use the +CXXFLAGS option in configure, as such: + + ./configure CXXFLAGS='-g -O2' + ./configure CFLAGS='-g -O2' + ./configure CPPFLAGS='-DDEBUG_MY_FEATURE' + +To enable gcov required options -fprofile-arcs -ftest-coverage enable them: + + ./configure --enable-coverage + +Run ./configure --help to see other configuration options + +Please be aware that the Python library will ignore the --prefix option +and just install wherever Python's distutils puts it (usually along +the lines of /usr/lib/pythonX.Y/site-packages/). If you need to control +where the Python modules are installed, set the PY_PREFIX variable. +(DESTDIR is respected for Python and C++.) + +Make thrift: + + make + +From the top directory, become superuser and do: + + make install + +Note that some language packages must be installed manually using build tools +better suited to those languages (at the time of this writing, this applies +to Java, Ruby, PHP). + +Look for the README.md file in the lib// folder for more details on the +installation of each language library package. + +Testing +======= + +There are a large number of client library tests that can all be run +from the top-level directory. + + make -k check + +This will make all of the libraries (as necessary), and run through +the unit tests defined in each of the client libraries. If a single +language fails, the make check will continue on and provide a synopsis +at the end. + +To run the cross-language test suite, please run: + + make cross + +This will run a set of tests that use different language clients and +servers. + +License +======= + +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/vendor/github.com/apache/thrift/Thrift.podspec b/vendor/github.com/apache/thrift/Thrift.podspec new file mode 100644 index 0000000000..2ead0e93df --- /dev/null +++ b/vendor/github.com/apache/thrift/Thrift.podspec @@ -0,0 +1,18 @@ +Pod::Spec.new do |s| + s.name = "Thrift" + s.version = "0.10.0" + s.summary = "Apache Thrift is a lightweight, language-independent software stack with an associated code generation mechanism for RPC." + s.description = <<-DESC +The Apache Thrift software framework, for scalable cross-language services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages. + DESC + s.homepage = "http://thrift.apache.org" + s.license = { :type => 'Apache License, Version 2.0', :url => 'https://raw.github.com/apache/thrift/thrift-0.9.0/LICENSE' } + s.author = { "The Apache Software Foundation" => "apache@apache.org" } + s.requires_arc = true + s.ios.deployment_target = '7.0' + s.osx.deployment_target = '10.8' + s.ios.framework = 'CFNetwork' + s.osx.framework = 'CoreServices' + s.source = { :git => "https://github.com/apache/thrift.git", :tag => "thrift-0.10.0" } + s.source_files = 'lib/cocoa/src/**/*.{h,m,swift}' +end \ No newline at end of file diff --git a/vendor/github.com/apache/thrift/appveyor.yml b/vendor/github.com/apache/thrift/appveyor.yml new file mode 100755 index 0000000000..03ee295435 --- /dev/null +++ b/vendor/github.com/apache/thrift/appveyor.yml @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# build Apache Thrift on AppVeyor - https://ci.appveyor.com + +shallow_clone: true +clone_depth: 10 + +version: '{build}' +os: +# - Windows Server 2012 R2 +- Visual Studio 2015 + +environment: + BOOST_ROOT: C:\Libraries\boost_1_59_0 + BOOST_LIBRARYDIR: C:\Libraries\boost_1_59_0\lib64-msvc-14.0 + # Unfurtunately, this version needs manual update because old versions are quickly deleted. + ANT_VERSION: 1.9.7 + +install: +- '"C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x64' +- cd \ + # Zlib +- appveyor DownloadFile https://github.com/madler/zlib/archive/v1.2.8.tar.gz +- 7z x v1.2.8.tar.gz -so | 7z x -si -ttar > nul +- cd zlib-1.2.8 +- cmake -G "Visual Studio 14 2015 Win64" . +- cmake --build . --config release +- cd .. + # OpenSSL +- C:\Python35-x64\python %APPVEYOR_BUILD_FOLDER%\build\appveyor\download_openssl.py +- ps: Start-Process "Win64OpenSSL.exe" -ArgumentList "/silent /verysilent /sp- /suppressmsgboxes" -Wait + # Libevent +- appveyor DownloadFile https://github.com/libevent/libevent/releases/download/release-2.0.22-stable/libevent-2.0.22-stable.tar.gz +- 7z x libevent-2.0.22-stable.tar.gz -so | 7z x -si -ttar > nul +- cd libevent-2.0.22-stable +- nmake -f Makefile.nmake +- mkdir lib +- move *.lib lib\ +- move WIN32-Code\event2\* include\event2\ +- move *.h include\ +- cd .. +- appveyor-retry cinst -y winflexbison +- appveyor DownloadFile http://www.us.apache.org/dist/ant/binaries/apache-ant-%ANT_VERSION%-bin.zip +- 7z x apache-ant-%ANT_VERSION%-bin.zip > nul +- cd %APPVEYOR_BUILD_FOLDER% +# TODO: Enable Haskell build +# - cinst HaskellPlatform -version 2014.2.0.0 + + +build_script: +- set PATH=C:\ProgramData\chocolatey\bin;C:\apache-ant-%ANT_VERSION%\bin;%PATH% +- set JAVA_HOME=C:\Program Files\Java\jdk1.7.0 +- set PATH=%JAVA_HOME%\bin;%PATH% +# - set PATH=%PATH%;C:\Program Files (x86)\Haskell Platform\2014.2.0.0\bin +# - set PATH=%PATH%;C:\Program Files (x86)\Haskell Platform\2014.2.0.0\lib\extralibs\bin +- set PATH=C:\Python27-x64\scripts;C:\Python27-x64;%PATH% +- pip install ipaddress backports.ssl_match_hostname tornado twisted +- mkdir cmake-build +- cd cmake-build +- cmake -G "Visual Studio 14 2015 Win64" -DWITH_SHARED_LIB=OFF -DLIBEVENT_ROOT=C:\libevent-2.0.22-stable -DZLIB_INCLUDE_DIR=C:\zlib-1.2.8 -DZLIB_LIBRARY=C:\zlib-1.2.8\release\zlibstatic.lib -DBOOST_ROOT="%BOOST_ROOT%" -DBOOST_LIBRARYDIR="%BOOST_LIBRARYDIR%" .. +- findstr /b /e BUILD_COMPILER:BOOL=ON CMakeCache.txt +- findstr /b /e BUILD_CPP:BOOL=ON CMakeCache.txt +- findstr /b /e BUILD_JAVA:BOOL=ON CMakeCache.txt +- findstr /b /e BUILD_PYTHON:BOOL=ON CMakeCache.txt +# - findstr /b /e BUILD_C_GLIB:BOOL=ON CMakeCache.txt +# - findstr /b /e BUILD_HASKELL:BOOL=ON CMakeCache.txt +- findstr /b /e BUILD_TESTING:BOOL=ON CMakeCache.txt +# - cmake --build . +- cmake --build . --config Release +# TODO: Fix cpack +# - cpack +# TODO: Run more tests +# CTest fails to invoke ant seemingly due to "ant.bat" v.s. "ant" (shell script) conflict. +# Currently, everything that involves OpenSSL seems to hang forever on our Appveyor setup. +# Also a few C++ tests hang (on Appveyor or on Windows in general). +- ctest -C Release --timeout 600 -VV -E "(StressTestNonBlocking|PythonTestSSLSocket|python_test$|^Java)" +# TODO make it perfect ;-r diff --git a/vendor/github.com/apache/thrift/bootstrap.sh b/vendor/github.com/apache/thrift/bootstrap.sh new file mode 100755 index 0000000000..52ecda47b3 --- /dev/null +++ b/vendor/github.com/apache/thrift/bootstrap.sh @@ -0,0 +1,54 @@ +#!/bin/sh + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +./cleanup.sh +if test -d lib/php/src/ext/thrift_protocol ; then + if phpize -v >/dev/null 2>/dev/null ; then + (cd lib/php/src/ext/thrift_protocol && phpize) + fi +fi + +set -e + +# libtoolize is called "glibtoolize" on OSX. +if libtoolize --version 1 >/dev/null 2>/dev/null; then + LIBTOOLIZE=libtoolize +elif glibtoolize --version 1 >/dev/null 2>/dev/null; then + LIBTOOLIZE=glibtoolize +else + echo >&2 "Couldn't find libtoolize!" + exit 1 +fi + +# we require automake 1.13 or later +# check must happen externally due to use of newer macro +AUTOMAKE_VERSION=`automake --version | grep automake | egrep -o '([0-9]{1,}\.)+[0-9]{1,}'` +if [ "$AUTOMAKE_VERSION" \< "1.13" ]; then + echo >&2 "automake version $AUTOMAKE_VERSION is too old (need 1.13 or later)" + exit 1 +fi + +autoscan +$LIBTOOLIZE --copy --automake +aclocal -I ./aclocal +autoheader +autoconf +automake --copy --add-missing --foreign diff --git a/vendor/github.com/apache/thrift/bower.json b/vendor/github.com/apache/thrift/bower.json new file mode 100644 index 0000000000..9ec59fcb5d --- /dev/null +++ b/vendor/github.com/apache/thrift/bower.json @@ -0,0 +1,16 @@ +{ + "name": "thrift", + "version": "0.10.0", + "homepage": "https://git-wip-us.apache.org/repos/asf/thrift.git", + "authors": [ + "Apache Thrift " + ], + "description": "Apache Thrift", + "main": "lib/js/src/thrift.js", + "keywords": [ + "thrift" + ], + "license": "Apache v2", + "ignore": [ + ] +} diff --git a/vendor/github.com/apache/thrift/cleanup.sh b/vendor/github.com/apache/thrift/cleanup.sh new file mode 100755 index 0000000000..f110721ac9 --- /dev/null +++ b/vendor/github.com/apache/thrift/cleanup.sh @@ -0,0 +1,89 @@ +#!/bin/sh + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +topsrcdir="`dirname $0`" +cd "$topsrcdir" + +make -k clean >/dev/null 2>&1 +make -k distclean >/dev/null 2>&1 +find . -name Makefile.in -exec rm -f {} \; +rm -rf \ +AUTHORS \ +ChangeLog \ +INSTALL \ +Makefile \ +Makefile.in \ +Makefile.orig \ +aclocal/libtool.m4 \ +aclocal/ltoptions.m4 \ +aclocal/ltsugar.m4 \ +aclocal/ltversion.m4 \ +aclocal/lt~obsolete.m4 \ +aclocal.m4 \ +autom4te.cache \ +autoscan.log \ +config.guess \ +config.h \ +config.hin \ +config.hin~ \ +config.log \ +config.status \ +config.status.lineno \ +config.sub \ +configure \ +configure.lineno \ +configure.scan \ +depcomp \ +.deps \ +install-sh \ +.libs \ +libtool \ +ltmain.sh \ +missing \ +ylwrap \ +if/gen-* \ +test/gen-* \ +lib/php/src/ext/thrift_protocol/.deps \ +lib/php/src/ext/thrift_protocol/Makefile \ +lib/php/src/ext/thrift_protocol/Makefile.fragments \ +lib/php/src/ext/thrift_protocol/Makefile.global \ +lib/php/src/ext/thrift_protocol/Makefile.objects \ +lib/php/src/ext/thrift_protocol/acinclude.m4 \ +lib/php/src/ext/thrift_protocol/aclocal.m4 \ +lib/php/src/ext/thrift_protocol/autom4te.cache \ +lib/php/src/ext/thrift_protocol/build \ +lib/php/src/ext/thrift_protocol/config.guess \ +lib/php/src/ext/thrift_protocol/config.h \ +lib/php/src/ext/thrift_protocol/config.h.in \ +lib/php/src/ext/thrift_protocol/config.log \ +lib/php/src/ext/thrift_protocol/config.nice \ +lib/php/src/ext/thrift_protocol/config.status \ +lib/php/src/ext/thrift_protocol/config.sub \ +lib/php/src/ext/thrift_protocol/configure \ +lib/php/src/ext/thrift_protocol/configure.in \ +lib/php/src/ext/thrift_protocol/include \ +lib/php/src/ext/thrift_protocol/install-sh \ +lib/php/src/ext/thrift_protocol/libtool \ +lib/php/src/ext/thrift_protocol/ltmain.sh \ +lib/php/src/ext/thrift_protocol/missing \ +lib/php/src/ext/thrift_protocol/mkinstalldirs \ +lib/php/src/ext/thrift_protocol/modules \ +lib/php/src/ext/thrift_protocol/run-tests.php diff --git a/vendor/github.com/apache/thrift/composer.json b/vendor/github.com/apache/thrift/composer.json new file mode 100644 index 0000000000..d937bc7ac2 --- /dev/null +++ b/vendor/github.com/apache/thrift/composer.json @@ -0,0 +1,30 @@ +{ + "name": "apache/thrift", + "description": "Apache Thrift RPC system", + "homepage": "http://thrift.apache.org/", + "type": "library", + "license": "Apache-2.0", + "authors": [ + { + "name": "Apache Thrift Developers", + "email": "dev@thrift.apache.org", + "homepage": "http://thrift.apache.org" + } + ], + "support": { + "email": "dev@thrift.apache.org", + "issues": "https://issues.apache.org/jira/browse/THRIFT" + }, + "require": { + "php": ">=5.3.0" + }, + "autoload": { + "psr-0": {"Thrift": "lib/php/lib/"} + }, + "minimum-stability": "dev", + "extra": { + "branch-alias": { + "dev-master": "0.10.0" + } + } +} diff --git a/vendor/github.com/apache/thrift/configure.ac b/vendor/github.com/apache/thrift/configure.ac new file mode 100755 index 0000000000..0972abfc9d --- /dev/null +++ b/vendor/github.com/apache/thrift/configure.ac @@ -0,0 +1,959 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +AC_PREREQ(2.65) +AC_CONFIG_MACRO_DIR([./aclocal]) + +AC_INIT([thrift], [0.10.0]) + +AC_CONFIG_AUX_DIR([.]) + +AM_INIT_AUTOMAKE([1.13 subdir-objects tar-ustar]) +PKG_PROG_PKG_CONFIG + +AC_ARG_VAR([PY_PREFIX], [Prefix for installing Python modules. + (Normal --prefix is ignored for Python because + Python has different conventions.) + Default = "/usr"]) +AS_IF([test "x$PY_PREFIX" = x], [PY_PREFIX="/usr"]) + +AC_ARG_VAR([JAVA_PREFIX], [Prefix for installing the Java lib jar. + Default = "/usr/local/lib"]) +AS_IF([test "x$JAVA_PREFIX" != x], [JAVA_PREFIX="$JAVA_PREFIX/usr/local/lib"], + [test "x$PREFIX" != x], [JAVA_PREFIX="$PREFIX/usr/local/lib"], + [JAVA_PREFIX="/usr/local/lib"]) + +AC_ARG_VAR([RUBY_PREFIX], [Prefix for installing Ruby modules. + (Normal --prefix is ignored for Ruby because + Ruby has different conventions.) + Default = none, let ruby setup decide]) + +AC_ARG_VAR([PHP_PREFIX], [Prefix for installing PHP modules. + (Normal --prefix is ignored for PHP because + PHP has different conventions.) + Default = "/usr/lib/php"]) +AS_IF([test "x$PHP_PREFIX" = x], [PHP_PREFIX="/usr/lib/php"]) + +AC_ARG_VAR([PHP_CONFIG_PREFIX], + [Prefix for installing PHP extension module .ini file. + (Normal --prefix is ignored for PHP because PHP has + different conventions.) + Default = "/etc/php.d"]) +AS_IF([test "x$PHP_CONFIG_PREFIX" = x], [PHP_CONFIG_PREFIX="/etc/php.d"]) + +AC_ARG_VAR([INSTALLDIRS], [When installing Perl modules, specifies which + of the sets of installation directories + to choose: perl, site or vendor. + Default = "vendor"]) +AS_IF([test "x$INSTALLDIRS" = x], [INSTALLDIRS="vendor"]) + +AC_ARG_VAR([PERL_PREFIX], [Prefix for installing Perl modules. + (Normal --prefix is ignored for Perl because + Perl has different conventions.) + Ignored, when INSTALLDIRS set to site or vendor. + Default = "/usr/local/lib"]) +AS_IF([test "x$PERL_PREFIX" = x], [PERL_PREFIX="/usr/local"]) + +AC_ARG_VAR([CABAL_CONFIGURE_FLAGS], + [Extra flags to pass to cabal: "cabal Setup.lhs configure $CABAL_CONFIGURE_FLAGS". + (Typically used to set --user or force --global.)]) + +AC_SUBST(CABAL_CONFIGURE_FLAGS) + +AC_ARG_VAR([D_IMPORT_PREFIX], [Prefix for installing D modules. + [INCLUDEDIR/d2]]) +AS_IF([test "x$D_IMPORT_PREFIX" = x], [D_IMPORT_PREFIX="${includedir}/d2"]) + +AC_ARG_VAR([DMD_LIBEVENT_FLAGS], [DMD flags for linking libevent (auto-detected if not set).]) +AC_ARG_VAR([DMD_OPENSSL_FLAGS], [DMD flags for linking OpenSSL (auto-detected if not set).]) + +AC_PROG_CC +AC_PROG_CPP +AC_PROG_CXX +AC_PROG_INSTALL +AC_PROG_LIBTOOL +AC_PROG_MAKE_SET +AC_PROG_BISON(2.5) +AC_PROG_YACC +AC_PROG_LEX +AM_PROG_LEX +AC_PROG_LN_S +AC_PROG_MKDIR_P +AC_PROG_AWK +AC_PROG_RANLIB + +AC_LANG([C++]) +AX_CXX_COMPILE_STDCXX_11([noext], [optional]) + +AM_EXTRA_RECURSIVE_TARGETS([style]) +AC_SUBST(CPPSTYLE_CMD, 'find . -type f \( -iname "*.h" -or -iname "*.cpp" -or -iname "*.cc" -or -iname "*.tcc" \) -printf "Reformatting: %h/%f\n" -exec clang-format -i {} \;') + +AC_ARG_ENABLE([libs], + AS_HELP_STRING([--enable-libs], [build the Apache Thrift libraries [default=yes]]), + [], enable_libs=yes +) +have_libs=yes +if test "$enable_libs" = "no"; then + have_libs="no" + with_cpp="no" + with_c_glib="no" + with_java="no" + with_csharp="no" + with_python="no" + with_ruby="no" + with_haskell="no" + with_haxe="no" + with_perl="no" + with_php="no" + with_php_extension="no" + with_dart="no" + with_erlang="no" + with_go="no" + with_d="no" + with_nodejs="no" + with_lua="no" +fi + + +AX_THRIFT_LIB(cpp, [C++], yes) +have_cpp=no +if test "$with_cpp" = "yes"; then + AX_BOOST_BASE([1.53.0]) + if test "x$succeeded" = "xyes" ; then + AC_SUBST([BOOST_LIB_DIR], [$(echo "$BOOST_LDFLAGS" | sed -e 's/^\-L//')]) + AC_SUBST([BOOST_CHRONO_LDADD], [$(echo "$BOOST_LIB_DIR/libboost_chrono.a")]) + AC_SUBST([BOOST_FILESYSTEM_LDADD], [$(echo "$BOOST_LIB_DIR/libboost_filesystem.a")]) + AC_SUBST([BOOST_SYSTEM_LDADD], [$(echo "$BOOST_LIB_DIR/libboost_system.a")]) + AC_SUBST([BOOST_TEST_LDADD], [$(echo "$BOOST_LIB_DIR/libboost_unit_test_framework.a")]) + AC_SUBST([BOOST_THREAD_LDADD], [$(echo "$BOOST_LIB_DIR/libboost_thread.a")]) + have_cpp="yes" + fi + + AX_CHECK_OPENSSL() + + AX_LIB_EVENT([1.0]) + have_libevent=$success + + AX_LIB_ZLIB([1.2.3]) + have_zlib=$success + + AX_THRIFT_LIB(qt4, [Qt], yes) + have_qt=no + if test "$with_qt4" = "yes"; then + PKG_CHECK_MODULES([QT], [QtCore >= 4.3, QtNetwork >= 4.3], have_qt=yes, have_qt=no) + fi + if test "$have_qt" = "yes"; then + AC_PATH_PROGS([QT_MOC], [moc-qt4 moc], "fail") + if test "$QT_MOC" = "fail"; then + have_qt=no + fi + fi + + AX_THRIFT_LIB(qt5, [Qt5], yes) + have_qt5=no + qt_reduce_reloc="" + if test "$with_qt5" = "yes"; then + PKG_CHECK_MODULES([QT5], [Qt5Core >= 5.0, Qt5Network >= 5.0], + [have_qt5=yes;qt_reduce_reloc=`$PKG_CONFIG --variable=qt_config Qt5Core | grep "reduce_relocations"`], + [have_qt5=no]) + fi + if test "$have_qt5" = "yes"; then + AC_PATH_PROGS([QT5_MOC], [moc-qt5 moc], "fail") + if test "$QT5_MOC" = "fail"; then + have_qt5=no + fi + fi +fi +AM_CONDITIONAL([WITH_CPP], [test "$have_cpp" = "yes"]) +AM_CONDITIONAL([AMX_HAVE_LIBEVENT], [test "$have_libevent" = "yes"]) +AM_CONDITIONAL([AMX_HAVE_ZLIB], [test "$have_zlib" = "yes"]) +AM_CONDITIONAL([AMX_HAVE_QT], [test "$have_qt" = "yes"]) +AM_CONDITIONAL([AMX_HAVE_QT5], [test "$have_qt5" = "yes"]) +AM_CONDITIONAL([QT5_REDUCE_RELOCATIONS], [test "x$qt_reduce_reloc" != "x"]) + +AX_THRIFT_LIB(c_glib, [C (GLib)], yes) +if test "$with_c_glib" = "yes"; then + PKG_CHECK_MODULES([GLIB], [glib-2.0 >= 2.0], have_glib2=yes, have_glib2=no) + PKG_CHECK_MODULES([GOBJECT], [gobject-2.0 >= 2.0], have_gobject2=yes, have_gobject2=no) + if test "$have_glib2" = "yes" -a "$have_gobject2" = "yes" ; then + have_c_glib="yes" + fi +fi +AM_CONDITIONAL(WITH_C_GLIB, [test "$have_glib2" = "yes" -a "$have_gobject2" = "yes"]) + +AX_THRIFT_LIB(csharp, [C#], yes) +if test "$with_csharp" = "yes"; then + PKG_CHECK_MODULES(MONO, mono >= 2.11.0, mono_2_11=yes, mono_2_11=no) + if test "$mono_2_11" == "yes"; then + AC_PATH_PROG([MCS], [mcs]) + if test "x$MCS" != "x"; then + mono_mcs="yes" + fi + fi + PKG_CHECK_MODULES(MONO, mono >= 2.0.0, net_3_5=yes, net_3_5=no) + PKG_CHECK_MODULES(MONO, mono >= 1.2.4, have_mono=yes, have_mono=no) + if test "$have_mono" = "yes" ; then + have_csharp="yes" + fi +fi +AM_CONDITIONAL(WITH_MONO, [test "$have_csharp" = "yes"]) +AM_CONDITIONAL(NET_2_0, [test "$net_3_5" = "no"]) +AM_CONDITIONAL(MONO_MCS, [test "$mono_mcs" = "yes"]) + +AX_THRIFT_LIB(java, [Java], yes) +if test "$with_java" = "yes"; then + AX_JAVAC_AND_JAVA + AC_PATH_PROG([ANT], [ant]) + AX_CHECK_ANT_VERSION($ANT, 1.7) + AC_SUBST(CLASSPATH) + AC_SUBST(ANT_FLAGS) + if test "x$JAVA" != "x" && test "x$JAVAC" != "x" && test "x$ANT" != "x" ; then + have_java="yes" + fi +fi +AM_CONDITIONAL([WITH_JAVA], [test "$have_java" = "yes"]) + +AX_THRIFT_LIB(erlang, [Erlang], yes) +if test "$with_erlang" = "yes"; then + AC_ERLANG_PATH_ERL + AC_ERLANG_PATH_ERLC + AC_PATH_PROG([REBAR], [rebar]) + if test -n "$ERLC" ; then + AC_ERLANG_SUBST_LIB_DIR + # Install into the detected Erlang directory instead of $libdir/erlang/lib + ERLANG_INSTALL_LIB_DIR="$ERLANG_LIB_DIR" + AC_ERLANG_SUBST_INSTALL_LIB_SUBDIR(AC_PACKAGE_NAME, AC_PACKAGE_VERSION) + fi + if test -n "$ERL" -a -n "$ERLC" && test "x$REBAR" != "x" ; then + have_erlang="yes" + + # otp_release is simply a number (like "17") for OTP17+ while "R16..." for OTP16 or less. + # OTP version is currently only used for running tests. + if $ERL -eval 'erlang:display(erlang:system_info(otp_release)),halt().' -noshell | grep "^\"R" >/dev/null; then + erlang_otp16_or_less="yes" + fi + fi +fi +AM_CONDITIONAL(WITH_ERLANG, [test "$have_erlang" = "yes"]) +AM_CONDITIONAL(ERLANG_OTP16, [test "$erlang_otp16_or_less" = "yes"]) + +AX_THRIFT_LIB(nodejs, [Nodejs], yes) +have_nodejs=no +if test "$with_nodejs" = "yes"; then + AC_PATH_PROGS([NODEJS], [nodejs node]) + AC_PATH_PROG([NPM], [npm]) + if test "x$NODEJS" != "x" -a "x$NPM" != "x"; then + have_nodejs="yes" + fi +fi +AM_CONDITIONAL(WITH_NODEJS, [test "$have_nodejs" = "yes"]) +AM_CONDITIONAL(HAVE_NPM, [test "x$NPM" != "x"]) + +AX_THRIFT_LIB(lua, [Lua], yes) +have_lua=no +if test "$with_lua" = "yes"; then + AX_PROG_LUA(5.2,, have_lua="yes", have_lua="no") + if test "$have_lua" = "yes"; then + AX_LUA_HEADERS(, have_lua="no") + AX_LUA_LIBS(, have_lua="no") + fi +fi +AM_CONDITIONAL(WITH_LUA, [test "$have_lua" = "yes"]) + +# Find python regardless of with_python value, because it's needed by make cross +AM_PATH_PYTHON(2.6,, :) +AX_THRIFT_LIB(python, [Python], yes) +if test "$with_python" = "yes"; then + if test -n "$PYTHON"; then + have_python="yes" + fi + AC_PATH_PROG([TRIAL], [trial]) + if test -n "$TRIAL"; then + have_trial="yes" + fi +fi +AM_CONDITIONAL(WITH_PYTHON, [test "$have_python" = "yes"]) +AM_CONDITIONAL(WITH_TWISTED_TEST, [test "$have_trial" = "yes"]) + +# Find "python3" executable. +# It's distro specific and far from ideal but needed to cross test py2-3 at once. +# TODO: find "python2" if it's 3.x +if python --version 2>&1 | grep -q "Python 2"; then + AC_PATH_PROGS([PYTHON3], [python3 python3.5 python35 python3.4 python34]) + if test -n "$PYTHON3"; then + have_py3="yes" + fi +fi +AM_CONDITIONAL(WITH_PY3, [test "$have_py3" = "yes"]) + +AX_THRIFT_LIB(perl, [Perl], yes) +if test "$with_perl" = "yes"; then + AC_PATH_PROG([PERL], [perl]) + if test -n "$PERL" ; then + AC_PROG_PERL_MODULES([Bit::Vector], success="yes", success="no") + have_perl_bit_vector="$success" + AC_PROG_PERL_MODULES([Class::Accessor], success="yes", success="no") + have_perl_class_accessor="$success" + fi + if test -n "$PERL" -a "$have_perl_bit_vector" = "yes" ; then + if test -n "$PERL" -a "$have_perl_class_accessor" = "yes" ; then + have_perl="yes" + fi + fi +fi +AM_CONDITIONAL(WITH_PERL, [test "$have_perl" = "yes"]) + +AX_THRIFT_LIB(php, [PHP], yes) +if test "$with_php" = "yes"; then + AC_PATH_PROG([PHP], [php]) + if test -n "$PHP" ; then + have_php="yes" + fi +fi +AM_CONDITIONAL(WITH_PHP, [test "$have_php" = "yes"]) + +AX_THRIFT_LIB(php_extension, [PHP_EXTENSION], yes) +if test "$with_php_extension" = "yes"; then + if test -f "lib/php/src/ext/thrift_protocol/configure"; then + AC_PATH_PROG([PHP_CONFIG], [php-config]) + if test -n "$PHP_CONFIG" ; then + AC_CONFIG_SUBDIRS([lib/php/src/ext/thrift_protocol]) + have_php_extension="yes" + fi + fi +fi +AM_CONDITIONAL(WITH_PHP_EXTENSION, [test "$have_php_extension" = "yes"]) + +AC_PATH_PROG([PHPUNIT], [phpunit]) +AM_CONDITIONAL(HAVE_PHPUNIT, [test "x$PHPUNIT" != "x"]) + +AX_THRIFT_LIB(dart, [DART], yes) +if test "$with_dart" = "yes"; then + AC_PATH_PROG([DART], [dart]) + AC_PATH_PROG([DARTPUB], [pub]) + if test "x$DART" != "x" -a "x$DARTPUB" != "x"; then + have_dart="yes" + fi +fi +AM_CONDITIONAL(WITH_DART, [test "$have_dart" = "yes"]) + +AX_THRIFT_LIB(ruby, [Ruby], yes) +have_ruby=no +if test "$with_ruby" = "yes"; then + AC_PATH_PROG([RUBY], [ruby]) + AC_PATH_PROG([BUNDLER], [bundle]) + if test "x$RUBY" != "x" -a "x$BUNDLER" != "x"; then + have_ruby="yes" + fi +fi +AM_CONDITIONAL(WITH_RUBY, [test "$have_ruby" = "yes"]) +AM_CONDITIONAL(HAVE_BUNDLER, [test "x$BUNDLER" != "x"]) + +AX_THRIFT_LIB(haskell, [Haskell], yes) +have_haskell=no +RUNHASKELL=true +CABAL=true +if test "$with_haskell" = "yes"; then + AC_PATH_PROG([CABAL], [cabal]) + AC_PATH_PROG([RUNHASKELL], [runhaskell]) + if test "x$CABAL" != "x" -a "x$RUNHASKELL" != "x"; then + have_haskell="yes" + else + RUNHASKELL=true + CABAL=true + fi +fi +AC_SUBST(CABAL) +AC_SUBST(RUNHASKELL) +AM_CONDITIONAL(WITH_HASKELL, [test "$have_haskell" = "yes"]) + +AX_THRIFT_LIB(go, [Go], yes) +if test "$with_go" = "yes"; then + AC_PATH_PROG([GO], [go]) + if [[ -x "$GO" ]] ; then + AS_IF([test -n "$GO"],[ + ax_go_version="1.4" + + AC_MSG_CHECKING([for Go version]) + golang_version=`$GO version 2>&1 | $SED -e 's/\(go \)\(version \)\(go\)\(@<:@0-9@:>@.@<:@0-9@:>@.@<:@0-9@:>@\)\(@<:@\*@:>@*\).*/\4/'` + AC_MSG_RESULT($golang_version) + AC_SUBST([golang_version],[$golang_version]) + AX_COMPARE_VERSION([$ax_go_version],[le],[$golang_version],[ + : + have_go="yes" + ],[ + : + have_go="no" + ]) + ],[ + AC_MSG_WARN([could not find Go ]) + have_go="no" + ]) + fi +fi +AM_CONDITIONAL(WITH_GO, [test "$have_go" = "yes"]) + + +AX_THRIFT_LIB(haxe, [Haxe], yes) +if test "$with_haxe" = "yes"; then + AC_PATH_PROG([HAXE], [haxe]) + if [[ -x "$HAXE" ]] ; then + AX_PROG_HAXE_VERSION( [3.1.3], have_haxe="yes", have_haxe="no") + fi +fi +AM_CONDITIONAL(WITH_HAXE, [test "$have_haxe" = "yes"]) + + +AX_THRIFT_LIB(d, [D], yes) +if test "$with_d" = "yes"; then + AX_DMD + AC_SUBST(DMD) + if test "x$DMD" != "x"; then + have_d="yes" + fi +fi + +# Determine actual name of the generated D library for use in the command line +# when compiling tests. This is needed because the -l syntax doesn't work +# with OPTLINK (Windows). +lib_prefix=lib +lib_suffix=a +case "$host_os" in + cygwin* | mingw* | pw32* | cegcc*) + lib_prefix="" + lib_suffix=lib + ;; +esac +D_LIB_NAME="${lib_prefix}thriftd.${lib_suffix}" +AC_SUBST(D_LIB_NAME) +D_EVENT_LIB_NAME="${lib_prefix}thriftd-event.${lib_suffix}" +AC_SUBST(D_EVENT_LIB_NAME) +D_SSL_LIB_NAME="${lib_prefix}thriftd-ssl.${lib_suffix}" +AC_SUBST(D_SSL_LIB_NAME) + +if test "$have_d" = "yes"; then + AX_CHECK_D_MODULE(deimos.event2.event) + have_deimos_event2=$success + + with_d_event_tests="no" + if test "$have_deimos_event2" = "yes"; then + if test "x$DMD_LIBEVENT_FLAGS" = "x"; then + if test "$dmd_optlink" = "yes"; then + AC_MSG_WARN([D libevent interface found, but cannot auto-detect \ +linker flags for OPTLINK. Please set DMD_LIBEVENT_FLAGS manually.]) + else + AX_LIB_EVENT([2.0]) + if test "$success" = "yes"; then + DMD_LIBEVENT_FLAGS=$(echo "$LIBEVENT_LDFLAGS $LIBEVENT_LIBS" | \ + sed -e 's/^ *//g;s/ *$//g;s/^\(.\)/-L\1/g;s/ */ -L/g') + with_d_event_tests="yes" + else + AC_MSG_WARN([D libevent interface present, but libevent library not found.]) + fi + fi + else + with_d_event_tests="yes" + fi + fi + + AX_CHECK_D_MODULE(deimos.openssl.ssl) + have_deimos_openssl=$success + + with_d_ssl_tests="no" + if test "$have_deimos_openssl" = "yes"; then + if test "x$DMD_OPENSSL_FLAGS" = "x"; then + if test "$dmd_optlink" = "yes"; then + AC_MSG_WARN([D OpenSSL interface found, but cannot auto-detect \ +linker flags for OPTLINK. Please set DMD_OPENSSL_FLAGS manually.]) + else + AX_CHECK_OPENSSL([with_d_ssl_tests="yes"]) + if test "$with_d_ssl_tests" = "yes"; then + DMD_OPENSSL_FLAGS=$(echo "$OPENSSL_LDFLAGS $OPENSSL_LIBS" | \ + sed -e 's/^ *//g;s/ *$//g;s/^\(.\)/-L\1/g;s/ */ -L/g') + else + AC_MSG_WARN([D OpenSSL interface present, but OpenSSL library not found.]) + fi + fi + else + with_d_ssl_tests="yes" + fi + fi +fi + +AM_CONDITIONAL(WITH_D, [test "$have_d" = "yes"]) +AM_CONDITIONAL(DMD_OPTLINK, [test "$dmd_optlink" = "yes"]) +AC_SUBST(DMD_OF_DIRSEP, "$dmd_of_dirsep") +AM_CONDITIONAL(HAVE_DEIMOS_EVENT2, [test "$have_deimos_event2" = "yes"]) +AM_CONDITIONAL(WITH_D_EVENT_TESTS, [test "$with_d_event_tests" = "yes"]) +AC_SUBST(DMD_LIBEVENT_FLAGS) +AM_CONDITIONAL(HAVE_DEIMOS_OPENSSL, [test "$have_deimos_openssl" = "yes"]) +AM_CONDITIONAL(WITH_D_SSL_TESTS, [test "$with_d_ssl_tests" = "yes"]) +AC_SUBST(DMD_OPENSSL_FLAGS) + +AC_ARG_ENABLE([tests], + AS_HELP_STRING([--enable-tests], [build tests [default=yes]]), + [], enable_tests=yes +) +have_tests=yes +if test "$enable_tests" = "no"; then + have_tests="no" +fi +AM_CONDITIONAL(WITH_TESTS, [test "$have_tests" = "yes"]) + +AC_ARG_ENABLE([plugin], + AS_HELP_STRING([--enable-plugin], [build compiler plugin support [default=yes]]), + [], enable_plugin=yes +) +have_plugin=yes +if test "$have_cpp" = "no" ; then + have_plugin="no" +fi +if test "$enable_plugin" = "no"; then + have_plugin="no" +fi +if test "$have_plugin" = "yes" ; then + AC_CONFIG_LINKS([compiler/cpp/test/plugin/t_cpp_generator.cc:compiler/cpp/src/thrift/generate/t_cpp_generator.cc]) +fi +AM_CONDITIONAL(WITH_PLUGIN, [test "$have_plugin" = "yes"]) + +AC_ARG_ENABLE([tutorial], + AS_HELP_STRING([--enable-tutorial], [build tutorial [default=yes]]), + [], enable_tutorial=yes +) +have_tutorial=yes +if test "$enable_tutorial" = "no"; then + have_tutorial="no" +fi +AM_CONDITIONAL(WITH_TUTORIAL, [test "$have_tutorial" = "yes"]) + +AM_CONDITIONAL(MINGW, false) +case "${host_os}" in +*mingw*) + mingw32_support="yes" + AC_CHECK_HEADER(windows.h) + AM_CONDITIONAL(MINGW, true) + ;; +*) + AC_ISC_POSIX + ;; +esac + +AC_C_CONST +AC_C_INLINE +AC_C_VOLATILE + +AC_HEADER_STDBOOL +AC_HEADER_STDC +AC_HEADER_TIME +AC_HEADER_SYS_WAIT +AC_TYPE_SIGNAL +AC_CHECK_HEADERS([arpa/inet.h]) +AC_CHECK_HEADERS([sys/param.h]) +AC_CHECK_HEADERS([fcntl.h]) +AC_CHECK_HEADERS([inttypes.h]) +AC_CHECK_HEADERS([limits.h]) +AC_CHECK_HEADERS([netdb.h]) +AC_CHECK_HEADERS([netinet/in.h]) +AC_CHECK_HEADERS([pthread.h]) +AC_CHECK_HEADERS([stddef.h]) +AC_CHECK_HEADERS([stdlib.h]) +AC_CHECK_HEADERS([sys/socket.h]) +AC_CHECK_HEADERS([sys/time.h]) +AC_CHECK_HEADERS([sys/un.h]) +AC_CHECK_HEADERS([sys/poll.h]) +AC_CHECK_HEADERS([sys/resource.h]) +AC_CHECK_HEADERS([unistd.h]) +AC_CHECK_HEADERS([libintl.h]) +AC_CHECK_HEADERS([malloc.h]) +AC_CHECK_HEADERS([openssl/ssl.h]) +AC_CHECK_HEADERS([openssl/rand.h]) +AC_CHECK_HEADERS([openssl/x509v3.h]) +AC_CHECK_HEADERS([sched.h]) +AC_CHECK_HEADERS([wchar.h]) + +AC_CHECK_LIB(pthread, pthread_create) +dnl NOTE(dreiss): I haven't been able to find any really solid docs +dnl on what librt is and how it fits into various Unix systems. +dnl My best guess is that it is where glibc stashes its implementation +dnl of the POSIX Real-Time Extensions. This seems necessary on Linux, +dnl and we haven't yet found a system where this is a problem. +AC_CHECK_LIB(rt, clock_gettime) +AC_CHECK_LIB(socket, setsockopt) + +AC_TYPE_INT16_T +AC_TYPE_INT32_T +AC_TYPE_INT64_T +AC_TYPE_INT8_T +AC_TYPE_MODE_T +AC_TYPE_OFF_T +AC_TYPE_SIZE_T +AC_TYPE_SSIZE_T +AC_TYPE_UINT16_T +AC_TYPE_UINT32_T +AC_TYPE_UINT64_T +AC_TYPE_UINT8_T +AC_CHECK_TYPES([ptrdiff_t], [], [exit 1]) + +AC_STRUCT_TM + +dnl NOTE(dreiss): AI_ADDRCONFIG is not defined on OpenBSD. +AC_CHECK_DECL([AI_ADDRCONFIG], [], + [AC_DEFINE([AI_ADDRCONFIG], 0, + [Define if the AI_ADDRCONFIG symbol is unavailable])], + [ + #include + #include + #include +]) + +AC_FUNC_ALLOCA +AC_FUNC_FORK +AC_FUNC_MALLOC +AC_FUNC_MEMCMP +AC_FUNC_REALLOC +AC_FUNC_SELECT_ARGTYPES +AC_FUNC_STAT +AC_FUNC_STRERROR_R +AC_FUNC_STRFTIME +AC_FUNC_VPRINTF +AC_CHECK_FUNCS([strtoul]) +AC_CHECK_FUNCS([bzero]) +AC_CHECK_FUNCS([ftruncate]) +AC_CHECK_FUNCS([gethostbyname]) +AC_CHECK_FUNCS([gethostbyname_r]) +AC_CHECK_FUNCS([gettimeofday]) +AC_CHECK_FUNCS([memmove]) +AC_CHECK_FUNCS([memset]) +AC_CHECK_FUNCS([mkdir]) +AC_CHECK_FUNCS([realpath]) +AC_CHECK_FUNCS([select]) +AC_CHECK_FUNCS([setlocale]) +AC_CHECK_FUNCS([socket]) +AC_CHECK_FUNCS([strchr]) +AC_CHECK_FUNCS([strdup]) +AC_CHECK_FUNCS([strerror]) +AC_CHECK_FUNCS([strstr]) +AC_CHECK_FUNCS([strtol]) +AC_CHECK_FUNCS([sqrt]) +dnl The following functions are optional. +AC_CHECK_FUNCS([alarm]) +AC_CHECK_FUNCS([clock_gettime]) +AC_CHECK_FUNCS([sched_get_priority_min]) +AC_CHECK_FUNCS([sched_get_priority_max]) +AC_CHECK_FUNCS([inet_ntoa]) +AC_CHECK_FUNCS([pow]) + +if test "$cross_compiling" = "no" ; then + AX_SIGNED_RIGHT_SHIFT +fi + +dnl autoscan thinks we need this macro because we have a member function +dnl called "error". Invoke the macro but don't run the check so autoscan +dnl thinks we are in the clear. It's highly unlikely that we will ever +dnl actually use the function that this checks for. +if false ; then + AC_FUNC_ERROR_AT_LINE +fi + +# --- Coverage hooks --- + +AC_ARG_ENABLE(coverage, + [ --enable-coverage turn on -fprofile-arcs -ftest-coverage], + [case "${enableval}" in + yes) ENABLE_COVERAGE=1 ;; + no) ENABLE_COVERAGE=0 ;; + *) AC_MSG_ERROR(bad value ${enableval} for --enable-cov) ;; + esac], + [ENABLE_COVERAGE=2]) + +if test "x[$]ENABLE_COVERAGE" = "x1"; then + AC_MSG_WARN(enable coverage) + GCOV_CFLAGS="`echo \"[$]CFLAGS\" | perl -pe 's/-O\d+//g;'` -fprofile-arcs -ftest-coverage" + GCOV_CXXFLAGS="`echo \"[$]CXXFLAGS\" | perl -pe 's/-O\d+//g;'` -fprofile-arcs -ftest-coverage" + GCOV_LDFLAGS="-XCClinker -fprofile-arcs -XCClinker -ftest-coverage" +fi + +AC_SUBST(ENABLE_COVERAGE) +AC_SUBST(GCOV_CFLAGS) +AC_SUBST(GCOV_CXXFLAGS) +AC_SUBST(GCOV_LDFLAGS) + +AC_ARG_ENABLE(boostthreads, + [ --enable-boostthreads use boost threads, instead of POSIX pthread (experimental) ], + [case "${enableval}" in + yes) ENABLE_BOOSTTHREADS=1 ;; + no) ENABLE_BOOSTTHREADS=0 ;; + *) AC_MSG_ERROR(bad value ${enableval} for --enable-cov) ;; + esac], + [ENABLE_BOOSTTHREADS=2]) + + +if test "x[$]ENABLE_BOOSTTHREADS" = "x1"; then + AC_MSG_WARN(enable boostthreads) + AC_DEFINE([USE_BOOST_THREAD], [1], [experimental --enable-boostthreads that replaces POSIX pthread by boost::thread]) + LIBS="-lboost_thread $LIBS" +fi + +AM_CONDITIONAL([WITH_BOOSTTHREADS], [test "x[$]ENABLE_BOOSTTHREADS" = "x1"]) + +AC_CONFIG_HEADERS(config.h:config.hin) +AC_CONFIG_HEADERS(lib/cpp/src/thrift/config.h:config.hin) +AC_CONFIG_HEADERS(lib/c_glib/src/thrift/config.h:config.hin) +# gruard against pre defined config.h +AH_TOP([ +#ifndef CONFIG_H +#define CONFIG_H +]) +AH_BOTTOM([ +#endif +]) + + +AC_CONFIG_FILES([ + Makefile + compiler/cpp/Makefile + compiler/cpp/src/Makefile + compiler/cpp/src/thrift/plugin/Makefile + compiler/cpp/test/Makefile + compiler/cpp/src/thrift/version.h + lib/Makefile + lib/cpp/Makefile + lib/cpp/test/Makefile + lib/cpp/thrift-nb.pc + lib/cpp/thrift-z.pc + lib/cpp/thrift-qt.pc + lib/cpp/thrift-qt5.pc + lib/cpp/thrift.pc + lib/c_glib/Makefile + lib/c_glib/thrift_c_glib.pc + lib/c_glib/test/Makefile + lib/csharp/Makefile + lib/csharp/test/Multiplex/Makefile + lib/d/Makefile + lib/d/test/Makefile + lib/erl/Makefile + lib/go/Makefile + lib/go/test/Makefile + lib/haxe/test/Makefile + lib/hs/Makefile + lib/java/Makefile + lib/js/Makefile + lib/js/test/Makefile + lib/json/Makefile + lib/json/test/Makefile + lib/nodejs/Makefile + lib/perl/Makefile + lib/perl/test/Makefile + lib/php/Makefile + lib/php/test/Makefile + lib/dart/Makefile + lib/py/Makefile + lib/rb/Makefile + lib/lua/Makefile + lib/xml/Makefile + lib/xml/test/Makefile + test/Makefile + test/features/Makefile + test/c_glib/Makefile + test/cpp/Makefile + test/csharp/Makefile + test/erl/Makefile + test/go/Makefile + test/haxe/Makefile + test/hs/Makefile + test/lua/Makefile + test/php/Makefile + test/dart/Makefile + test/perl/Makefile + test/py/Makefile + test/py.twisted/Makefile + test/py.tornado/Makefile + test/rb/Makefile + tutorial/Makefile + tutorial/c_glib/Makefile + tutorial/cpp/Makefile + tutorial/d/Makefile + tutorial/go/Makefile + tutorial/haxe/Makefile + tutorial/hs/Makefile + tutorial/java/Makefile + tutorial/js/Makefile + tutorial/nodejs/Makefile + tutorial/dart/Makefile + tutorial/py/Makefile + tutorial/py.twisted/Makefile + tutorial/py.tornado/Makefile + tutorial/rb/Makefile +]) + +if test "$have_cpp" = "yes" ; then MAYBE_CPP="cpp" ; else MAYBE_CPP="" ; fi +AC_SUBST([MAYBE_CPP]) +if test "$have_c_glib" = "yes" ; then MAYBE_C_GLIB="c_glib" ; else MAYBE_C_GLIB="" ; fi +AC_SUBST([MAYBE_C_GLIB]) +if test "$have_d" = "yes" -a "$have_deimos_event2" = "yes" -a "$have_deimos_openssl" = "yes"; then MAYBE_D="d" ; else MAYBE_D="" ; fi +AC_SUBST([MAYBE_D]) +if test "$have_java" = "yes" ; then MAYBE_JAVA="java" ; else MAYBE_JAVA="" ; fi +AC_SUBST([MAYBE_JAVA]) +if test "$have_csharp" = "yes" ; then MAYBE_CSHARP="csharp" ; else MAYBE_CSHARP="" ; fi +AC_SUBST([MAYBE_CSHARP]) +if test "$have_python" = "yes" ; then MAYBE_PYTHON="py" ; else MAYBE_PYTHON="" ; fi +AC_SUBST([MAYBE_PYTHON]) +if test "$have_py3" = "yes" ; then MAYBE_PY3="py3" ; else MAYBE_PY3="" ; fi +AC_SUBST([MAYBE_PY3]) +if test "$have_ruby" = "yes" ; then MAYBE_RUBY="rb" ; else MAYBE_RUBY="" ; fi +AC_SUBST([MAYBE_RUBY]) +if test "$have_haskell" = "yes" ; then MAYBE_HASKELL="hs" ; else MAYBE_HASKELL="" ; fi +AC_SUBST([MAYBE_HASKELL]) +if test "$have_perl" = "yes" ; then MAYBE_PERL="perl" ; else MAYBE_PERL="" ; fi +AC_SUBST([MAYBE_PERL]) +if test "$have_php" = "yes" ; then MAYBE_PHP="php" ; else MAYBE_PHP="" ; fi +AC_SUBST([MAYBE_PHP]) +if test "$have_dart" = "yes" ; then MAYBE_DART="dart" ; else MAYBE_DART="" ; fi +AC_SUBST([MAYBE_DART]) +if test "$have_go" = "yes" ; then MAYBE_GO="go" ; else MAYBE_GO="" ; fi +AC_SUBST([MAYBE_GO]) +if test "$have_nodejs" = "yes" ; then MAYBE_NODEJS="nodejs" ; else MAYBE_NODEJS="" ; fi +AC_SUBST([MAYBE_NODEJS]) +if test "$have_erlang" = "yes" ; then MAYBE_ERLANG="erl" ; else MAYBE_ERLANG="" ; fi +AC_SUBST([MAYBE_ERLANG]) +if test "$have_lua" = "yes" ; then MAYBE_LUA="lua" ; else MAYBE_LUA="" ; fi +AC_SUBST([MAYBE_LUA]) + +AC_OUTPUT + + +echo +echo "$PACKAGE $VERSION" +echo +echo "Building Plugin Support ...... : $have_plugin" +echo "Building C++ Library ......... : $have_cpp" +echo "Building C (GLib) Library .... : $have_c_glib" +echo "Building Java Library ........ : $have_java" +echo "Building C# Library .......... : $have_csharp" +echo "Building Python Library ...... : $have_python" +echo "Building Ruby Library ........ : $have_ruby" +echo "Building Haxe Library ........ : $have_haxe" +echo "Building Haskell Library ..... : $have_haskell" +echo "Building Perl Library ........ : $have_perl" +echo "Building PHP Library ......... : $have_php" +echo "Building Dart Library ........ : $have_dart" +echo "Building Erlang Library ...... : $have_erlang" +echo "Building Go Library .......... : $have_go" +echo "Building D Library ........... : $have_d" +echo "Building NodeJS Library ...... : $have_nodejs" +echo "Building Lua Library ......... : $have_lua" + +if test "$have_cpp" = "yes" ; then + echo + echo "C++ Library:" + echo " Build TZlibTransport ...... : $have_zlib" + echo " Build TNonblockingServer .. : $have_libevent" + echo " Build TQTcpServer (Qt4) .... : $have_qt" + echo " Build TQTcpServer (Qt5) .... : $have_qt5" +fi +if test "$have_java" = "yes" ; then + echo + echo "Java Library:" + echo " Using javac ............... : $JAVAC" + echo " Using java ................ : $JAVA" + echo " Using ant ................. : $ANT" +fi +if test "$have_csharp" = "yes" ; then + echo + echo "C# Library:" + echo " Using .NET 3.5 ............ : $net_3_5" +fi +if test "$have_python" = "yes" ; then + echo + echo "Python Library:" + echo " Using Python .............. : $PYTHON" + if test "$have_py3" = "yes" ; then + echo " Using Python3 ............. : $PYTHON3" + fi + if test "$have_trial" = "yes"; then + echo " Using trial ............... : $TRIAL" + fi +fi +if test "$have_php" = "yes" ; then + echo + echo "PHP Library:" + echo " Using php-config .......... : $PHP_CONFIG" +fi +if test "$have_dart" = "yes" ; then + echo + echo "Dart Library:" + echo " Using Dart ................ : $DART" + echo " Using Pub ................. : $DARTPUB" +fi +if test "$have_ruby" = "yes" ; then + echo + echo "Ruby Library:" + echo " Using Ruby ................ : $RUBY" +fi +if test "$have_haskell" = "yes" ; then + echo + echo "Haskell Library:" + echo " Using Haskell ............. : $RUNHASKELL" + echo " Using Cabal ............... : $CABAL" +fi +if test "$have_haxe" = "yes" ; then + echo + echo "Haxe Library:" + echo " Using Haxe ................ : $HAXE" + echo " Using Haxe version ........ : $HAXE_VERSION" +fi +if test "$have_perl" = "yes" ; then + echo + echo "Perl Library:" + echo " Using Perl ................ : $PERL" +fi +if test "$have_erlang" = "yes" ; then + echo + echo "Erlang Library:" + echo " Using erlc ................ : $ERLC" + echo " Using rebar ............... : $REBAR" +fi +if test "$have_go" = "yes" ; then + echo + echo "Go Library:" + echo " Using Go................... : $GO" + echo " Using Go version........... : $($GO version)" +fi +if test "$have_d" = "yes" ; then + echo + echo "D Library:" + echo " Using D Compiler .......... : $DMD" + echo " Building D libevent tests . : $with_d_event_tests" + echo " Building D SSL tests ...... : $with_d_ssl_tests" +fi +if test "$have_nodejs" = "yes" ; then + echo + echo "NodeJS Library:" + echo " Using NodeJS .............. : $NODEJS" + echo " Using NodeJS version....... : $($NODEJS --version)" +fi +if test "$have_lua" = "yes" ; then + echo + echo "Lua Library:" + echo " Using Lua .............. : $LUA" +fi +echo +echo "If something is missing that you think should be present," +echo "please skim the output of configure to find the missing" +echo "component. Details are present in config.log." diff --git a/vendor/github.com/apache/thrift/doap.rdf b/vendor/github.com/apache/thrift/doap.rdf new file mode 100755 index 0000000000..14a8d6638d --- /dev/null +++ b/vendor/github.com/apache/thrift/doap.rdf @@ -0,0 +1,132 @@ + + + + + + 2012-04-14 + + Apache Thrift + + + Apache Thrift software provides a framework for scalable cross-language services development, combines a software stack with a code generation engine to build services that work efficiently and seamlessly between C++, Java, Python, PHP, Ruby, Erlang, Perl, Haskell, C#, Cocoa, JavaScript, Node.js, Smalltalk, OCaml and Delphi and other languages. + Apache Thrift allows you to define data types and service interfaces in a simple definition file. Taking that file as input, the compiler generates code to be used to easily build RPC clients and servers that communicate seamlessly across programming languages. Instead of writing a load of boilerplate code to serialize and transport your objects and invoke remote methods, you can get right down to business. + + + + ActionScript + C + C# + C++ + Cocoa + D + Delphi + Erlang + Go + Haskell + Java + JavaScript + node.js + OCaml + Perl + PHP + Python + SmallTalk + + + + + + + Apache Thrift + 2015-09-25 + 0.9.3 + + + Apache Thrift + 2014-11-05 + 0.9.2 + + + Apache Thrift + 2013-08-22 + 0.9.1 + + + Apache Thrift + 2012-10-15 + 0.9.0 + + + Apache Thrift + 2011-11-29 + 0.8.0 + + + Apache Thrift + 2011-08-13 + 0.7.0 + + + Apache Thrift + 2011-04-25 + 0.6.1 + + + Apache Thrift + 2011-02-08 + 0.6.0 + + + Apache Thrift (incubating) + 2010-10-07 + 0.5.0 + + + Apache Thrift (incubating) + 2010-08-23 + 0.4.0 + + + Apache Thrift (incubating) + 2010-08-04 + 0.3.0 + + + Apache Thrift (incubating) + 2009-12-11 + 0.2.0 + + + + + + + + + + + Apache Thrift PMC + + + + + diff --git a/vendor/github.com/apache/thrift/lib/Makefile.am b/vendor/github.com/apache/thrift/lib/Makefile.am new file mode 100644 index 0000000000..5f17fca882 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/Makefile.am @@ -0,0 +1,109 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +SUBDIRS = json xml +PRECROSS_TARGET = + +if WITH_CPP +# cpp dir is picked directly by plugin build +if !WITH_PLUGIN +SUBDIRS += cpp +endif +endif + +if WITH_C_GLIB +SUBDIRS += c_glib +endif + +if WITH_MONO +SUBDIRS += csharp +endif + +if WITH_JAVA +SUBDIRS += java +PRECROSS_TARGET += precross-java +# JavaScript unit test depends on java +# so test only if java, ant & co is available +SUBDIRS += js +endif + +if WITH_PYTHON +SUBDIRS += py +endif + +if WITH_ERLANG +SUBDIRS += erl +endif + +if WITH_RUBY +SUBDIRS += rb +endif + +if WITH_HASKELL +SUBDIRS += hs +endif + +if WITH_PERL +SUBDIRS += perl +endif + +if WITH_PHP +SUBDIRS += php +endif + +if WITH_DART +SUBDIRS += dart +endif + +if WITH_GO +SUBDIRS += go +endif + +if WITH_D +SUBDIRS += d +PRECROSS_TARGET += precross-d +endif + +if WITH_NODEJS +SUBDIRS += nodejs +PRECROSS_TARGET += precross-nodejs +endif + +if WITH_LUA +SUBDIRS += lua +endif + +# All of the libs that don't use Automake need to go in here +# so they will end up in our release tarballs. +EXTRA_DIST = \ + as3 \ + cocoa \ + d \ + dart \ + delphi \ + haxe \ + javame \ + js \ + ocaml \ + st \ + ts + +precross-%: + $(MAKE) -C $* precross +precross: $(PRECROSS_TARGET) diff --git a/vendor/github.com/apache/thrift/lib/go/Makefile.am b/vendor/github.com/apache/thrift/lib/go/Makefile.am new file mode 100644 index 0000000000..ff946ea8d0 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/Makefile.am @@ -0,0 +1,42 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +SUBDIRS = . + +if WITH_TESTS +SUBDIRS += test +endif + +install: + @echo '##############################################################' + @echo '##############################################################' + @echo 'The Go client library should be installed via "go get", please see /lib/go/README.md' + @echo '##############################################################' + @echo '##############################################################' + +check-local: + $(GO) test ./thrift + +all-local: + $(GO) build ./thrift + +EXTRA_DIST = \ + thrift \ + coding_standards.md \ + README.md diff --git a/vendor/github.com/apache/thrift/lib/go/README.md b/vendor/github.com/apache/thrift/lib/go/README.md new file mode 100644 index 0000000000..7440474c32 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/README.md @@ -0,0 +1,81 @@ +Thrift Go Software Library + +License +======= + +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. + + +Using Thrift with Go +==================== + +In following Go conventions, we recommend you use the 'go' tool to install +Thrift for go. + + $ go get git.apache.org/thrift.git/lib/go/thrift/... + +Will retrieve and install the most recent version of the package. + + +A note about optional fields +============================ + +The thrift-to-Go compiler tries to represent thrift IDL structs as Go structs. +We must be able to distinguish between optional fields that are set to their +default value and optional values which are actually unset, so the generated +code represents optional fields via pointers. + +This is generally intuitive and works well much of the time, but Go does not +have a syntax for creating a pointer to a constant in a single expression. That +is, given a struct like + + struct SomeIDLType { + OptionalField *int32 + } + +, the following will not compile: + + x := &SomeIDLType{ + OptionalField: &(3), + } + +(Nor is there any other syntax that's built in to the language) + +As such, we provide some helpers that do just this under lib/go/thrift/. E.g., + + x := &SomeIDLType{ + OptionalField: thrift.Int32Ptr(3), + } + +And so on. The code generator also creates analogous helpers for user-defined +typedefs and enums. + +Adding custom tags to generated Thrift structs +============================================== + +You can add tags to the auto-generated thrift structs using the following format: + + struct foo { + 1: required string Bar (go.tag = "some_tag:\"some_tag_value\"") + } + +which will generate: + + type Foo struct { + Bar string `thrift:"bar,1,required" some_tag:"some_tag_value"` + } diff --git a/vendor/github.com/apache/thrift/lib/go/coding_standards.md b/vendor/github.com/apache/thrift/lib/go/coding_standards.md new file mode 100644 index 0000000000..fa0390bb57 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/coding_standards.md @@ -0,0 +1 @@ +Please follow [General Coding Standards](/doc/coding_standards.md) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go new file mode 100644 index 0000000000..7010f868f1 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception_test.go @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "testing" +) + +func TestTApplicationException(t *testing.T) { + exc := NewTApplicationException(UNKNOWN_APPLICATION_EXCEPTION, "") + if exc.Error() != "" { + t.Fatalf("Expected empty string for exception but found '%s'", exc.Error()) + } + if exc.TypeId() != UNKNOWN_APPLICATION_EXCEPTION { + t.Fatalf("Expected type UNKNOWN for exception but found '%s'", exc.TypeId()) + } + exc = NewTApplicationException(WRONG_METHOD_NAME, "junk_method") + if exc.Error() != "junk_method" { + t.Fatalf("Expected 'junk_method' for exception but found '%s'", exc.Error()) + } + if exc.TypeId() != WRONG_METHOD_NAME { + t.Fatalf("Expected type WRONG_METHOD_NAME for exception but found '%s'", exc.TypeId()) + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol_test.go new file mode 100644 index 0000000000..0462cc79de --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol_test.go @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "testing" +) + +func TestReadWriteBinaryProtocol(t *testing.T) { + ReadWriteProtocolTest(t, NewTBinaryProtocolFactoryDefault()) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport_test.go new file mode 100644 index 0000000000..95ec0cbd2c --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport_test.go @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "testing" +) + +func TestBufferedTransport(t *testing.T) { + trans := NewTBufferedTransport(NewTMemoryBuffer(), 10240) + TransportTest(t, trans, trans) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol_test.go new file mode 100644 index 0000000000..72812f9cb0 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol_test.go @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "testing" +) + +func TestReadWriteCompactProtocol(t *testing.T) { + ReadWriteProtocolTest(t, NewTCompactProtocolFactory()) + transports := []TTransport{ + NewTMemoryBuffer(), + NewStreamTransportRW(bytes.NewBuffer(make([]byte, 0, 16384))), + NewTFramedTransport(NewTMemoryBuffer()), + } + for _, trans := range transports { + p := NewTCompactProtocol(trans); + ReadWriteBool(t, p, trans); + p = NewTCompactProtocol(trans); + ReadWriteByte(t, p, trans); + p = NewTCompactProtocol(trans); + ReadWriteI16(t, p, trans); + p = NewTCompactProtocol(trans); + ReadWriteI32(t, p, trans); + p = NewTCompactProtocol(trans); + ReadWriteI64(t, p, trans); + p = NewTCompactProtocol(trans); + ReadWriteDouble(t, p, trans); + p = NewTCompactProtocol(trans); + ReadWriteString(t, p, trans); + p = NewTCompactProtocol(trans); + ReadWriteBinary(t, p, trans); + trans.Close(); + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception_test.go new file mode 100644 index 0000000000..71f5e2c7e7 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/exception_test.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "testing" +) + +func TestPrependError(t *testing.T) { + err := NewTApplicationException(INTERNAL_ERROR, "original error") + err2, ok := PrependError("Prepend: ", err).(TApplicationException) + if !ok { + t.Fatal("Couldn't cast error TApplicationException") + } + if err2.Error() != "Prepend: original error" { + t.Fatal("Unexpected error string") + } + if err2.TypeId() != INTERNAL_ERROR { + t.Fatal("Unexpected type error") + } + + err3 := NewTProtocolExceptionWithType(INVALID_DATA, errors.New("original error")) + err4, ok := PrependError("Prepend: ", err3).(TProtocolException) + if !ok { + t.Fatal("Couldn't cast error TProtocolException") + } + if err4.Error() != "Prepend: original error" { + t.Fatal("Unexpected error string") + } + if err4.TypeId() != INVALID_DATA { + t.Fatal("Unexpected type error") + } + + err5 := NewTTransportException(TIMED_OUT, "original error") + err6, ok := PrependError("Prepend: ", err5).(TTransportException) + if !ok { + t.Fatal("Couldn't cast error TTransportException") + } + if err6.Error() != "Prepend: original error" { + t.Fatal("Unexpected error string") + } + if err6.TypeId() != TIMED_OUT { + t.Fatal("Unexpected type error") + } + + err7 := errors.New("original error") + err8 := PrependError("Prepend: ", err7) + if err8.Error() != "Prepend: original error" { + t.Fatal("Unexpected error string") + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport_test.go new file mode 100644 index 0000000000..8f683ef306 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport_test.go @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "testing" +) + +func TestFramedTransport(t *testing.T) { + trans := NewTFramedTransport(NewTMemoryBuffer()) + TransportTest(t, trans, trans) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_client_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_client_test.go new file mode 100644 index 0000000000..453680ace8 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_client_test.go @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net/http" + "testing" +) + +func TestHttpClient(t *testing.T) { + l, addr := HttpClientSetupForTest(t) + if l != nil { + defer l.Close() + } + trans, err := NewTHttpPostClient("http://" + addr.String()) + if err != nil { + l.Close() + t.Fatalf("Unable to connect to %s: %s", addr.String(), err) + } + TransportTest(t, trans, trans) +} + +func TestHttpClientHeaders(t *testing.T) { + l, addr := HttpClientSetupForTest(t) + if l != nil { + defer l.Close() + } + trans, err := NewTHttpPostClient("http://" + addr.String()) + if err != nil { + l.Close() + t.Fatalf("Unable to connect to %s: %s", addr.String(), err) + } + TransportHeaderTest(t, trans, trans) +} + +func TestHttpCustomClient(t *testing.T) { + l, addr := HttpClientSetupForTest(t) + if l != nil { + defer l.Close() + } + + httpTransport := &customHttpTransport{} + + trans, err := NewTHttpPostClientWithOptions("http://"+addr.String(), THttpClientOptions{ + Client: &http.Client{ + Transport: httpTransport, + }, + }) + if err != nil { + l.Close() + t.Fatalf("Unable to connect to %s: %s", addr.String(), err) + } + TransportHeaderTest(t, trans, trans) + + if !httpTransport.hit { + t.Fatalf("Custom client was not used") + } +} + +func TestHttpCustomClientPackageScope(t *testing.T) { + l, addr := HttpClientSetupForTest(t) + if l != nil { + defer l.Close() + } + httpTransport := &customHttpTransport{} + DefaultHttpClient = &http.Client{ + Transport: httpTransport, + } + + trans, err := NewTHttpPostClient("http://" + addr.String()) + if err != nil { + l.Close() + t.Fatalf("Unable to connect to %s: %s", addr.String(), err) + } + TransportHeaderTest(t, trans, trans) + + if !httpTransport.hit { + t.Fatalf("Custom client was not used") + } +} + +type customHttpTransport struct { + hit bool +} + +func (c *customHttpTransport) RoundTrip(req *http.Request) (*http.Response, error) { + c.hit = true + return http.DefaultTransport.RoundTrip(req) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport_test.go new file mode 100644 index 0000000000..15a6116427 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport_test.go @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "testing" +) + +func TestStreamTransport(t *testing.T) { + trans := NewStreamTransportRW(bytes.NewBuffer(make([]byte, 0, 1024))) + TransportTest(t, trans, trans) +} + +func TestStreamTransportOpenClose(t *testing.T) { + trans := NewStreamTransportRW(bytes.NewBuffer(make([]byte, 0, 1024))) + if !trans.IsOpen() { + t.Fatal("StreamTransport should be already open") + } + if trans.Open() == nil { + t.Fatal("StreamTransport should return error when open twice") + } + if trans.Close() != nil { + t.Fatal("StreamTransport should not return error when closing open transport") + } + if trans.IsOpen() { + t.Fatal("StreamTransport should not be open after close") + } + if trans.Close() == nil { + t.Fatal("StreamTransport should return error when closing a non open transport") + } + if trans.Open() == nil { + t.Fatal("StreamTransport should not be able to reopen") + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go new file mode 100644 index 0000000000..7104ce3a02 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol_test.go @@ -0,0 +1,649 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "math" + "strconv" + "testing" +) + +func TestWriteJSONProtocolBool(t *testing.T) { + thetype := "boolean" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + for _, value := range BOOL_VALUES { + if e := p.WriteBool(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + expected := "" + if value { + expected = "1" + } else { + expected = "0" + } + if s != expected { + t.Fatalf("Bad value for %s %v: %s expected", thetype, value, s) + } + v := -1 + if err := json.Unmarshal([]byte(s), &v); err != nil || (v != 0) != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadJSONProtocolBool(t *testing.T) { + thetype := "boolean" + for _, value := range BOOL_VALUES { + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + if value { + trans.Write([]byte{'1'}) // not JSON_TRUE + } else { + trans.Write([]byte{'0'}) // not JSON_FALSE + } + trans.Flush() + s := trans.String() + v, e := p.ReadBool() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + vv := -1 + if err := json.Unmarshal([]byte(s), &vv); err != nil || (vv != 0) != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, vv) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteJSONProtocolByte(t *testing.T) { + thetype := "byte" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + for _, value := range BYTE_VALUES { + if e := p.WriteByte(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int8(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadJSONProtocolByte(t *testing.T) { + thetype := "byte" + for _, value := range BYTE_VALUES { + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + trans.WriteString(strconv.Itoa(int(value))) + trans.Flush() + s := trans.String() + v, e := p.ReadByte() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteJSONProtocolI16(t *testing.T) { + thetype := "int16" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + for _, value := range INT16_VALUES { + if e := p.WriteI16(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int16(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadJSONProtocolI16(t *testing.T) { + thetype := "int16" + for _, value := range INT16_VALUES { + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + trans.WriteString(strconv.Itoa(int(value))) + trans.Flush() + s := trans.String() + v, e := p.ReadI16() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteJSONProtocolI32(t *testing.T) { + thetype := "int32" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + for _, value := range INT32_VALUES { + if e := p.WriteI32(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int32(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadJSONProtocolI32(t *testing.T) { + thetype := "int32" + for _, value := range INT32_VALUES { + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + trans.WriteString(strconv.Itoa(int(value))) + trans.Flush() + s := trans.String() + v, e := p.ReadI32() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteJSONProtocolI64(t *testing.T) { + thetype := "int64" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + for _, value := range INT64_VALUES { + if e := p.WriteI64(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int64(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadJSONProtocolI64(t *testing.T) { + thetype := "int64" + for _, value := range INT64_VALUES { + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + trans.WriteString(strconv.FormatInt(value, 10)) + trans.Flush() + s := trans.String() + v, e := p.ReadI64() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteJSONProtocolDouble(t *testing.T) { + thetype := "double" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + for _, value := range DOUBLE_VALUES { + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if math.IsInf(value, 1) { + if s != jsonQuote(JSON_INFINITY) { + t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_INFINITY)) + } + } else if math.IsInf(value, -1) { + if s != jsonQuote(JSON_NEGATIVE_INFINITY) { + t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_NEGATIVE_INFINITY)) + } + } else if math.IsNaN(value) { + if s != jsonQuote(JSON_NAN) { + t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_NAN)) + } + } else { + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := float64(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + } + trans.Reset() + } + trans.Close() +} + +func TestReadJSONProtocolDouble(t *testing.T) { + thetype := "double" + for _, value := range DOUBLE_VALUES { + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + n := NewNumericFromDouble(value) + trans.WriteString(n.String()) + trans.Flush() + s := trans.String() + v, e := p.ReadDouble() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if math.IsInf(value, 1) { + if !math.IsInf(v, 1) { + t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v) + } + } else if math.IsInf(value, -1) { + if !math.IsInf(v, -1) { + t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v) + } + } else if math.IsNaN(value) { + if !math.IsNaN(v) { + t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v) + } + } else { + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + } + trans.Reset() + trans.Close() + } +} + +func TestWriteJSONProtocolString(t *testing.T) { + thetype := "string" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + for _, value := range STRING_VALUES { + if e := p.WriteString(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s[0] != '"' || s[len(s)-1] != '"' { + t.Fatalf("Bad value for %s '%v', wrote '%v', expected: %v", thetype, value, s, fmt.Sprint("\"", value, "\"")) + } + v := new(string) + if err := json.Unmarshal([]byte(s), v); err != nil || *v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadJSONProtocolString(t *testing.T) { + thetype := "string" + for _, value := range STRING_VALUES { + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + trans.WriteString(jsonQuote(value)) + trans.Flush() + s := trans.String() + v, e := p.ReadString() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + v1 := new(string) + if err := json.Unmarshal([]byte(s), v1); err != nil || *v1 != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v1) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteJSONProtocolBinary(t *testing.T) { + thetype := "binary" + value := protocol_bdata + b64value := make([]byte, base64.StdEncoding.EncodedLen(len(protocol_bdata))) + base64.StdEncoding.Encode(b64value, value) + b64String := string(b64value) + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + if e := p.WriteBinary(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + expectedString := fmt.Sprint("\"", b64String, "\"") + if s != expectedString { + t.Fatalf("Bad value for %s %v\n wrote: \"%v\"\nexpected: \"%v\"", thetype, value, s, expectedString) + } + v1, err := p.ReadBinary() + if err != nil { + t.Fatalf("Unable to read binary: %s", err.Error()) + } + if len(v1) != len(value) { + t.Fatalf("Invalid value for binary\nexpected: \"%v\"\n read: \"%v\"", value, v1) + } + for k, v := range value { + if v1[k] != v { + t.Fatalf("Invalid value for binary at %v\nexpected: \"%v\"\n read: \"%v\"", k, v, v1[k]) + } + } + trans.Close() +} + +func TestReadJSONProtocolBinary(t *testing.T) { + thetype := "binary" + value := protocol_bdata + b64value := make([]byte, base64.StdEncoding.EncodedLen(len(protocol_bdata))) + base64.StdEncoding.Encode(b64value, value) + b64String := string(b64value) + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + trans.WriteString(jsonQuote(b64String)) + trans.Flush() + s := trans.String() + v, e := p.ReadBinary() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if len(v) != len(value) { + t.Fatalf("Bad value for %s value length %v, wrote: %v, received length: %v", thetype, len(value), s, len(v)) + } + for i := 0; i < len(v); i++ { + if v[i] != value[i] { + t.Fatalf("Bad value for %s at index %d value %v, wrote: %v, received: %v", thetype, i, value[i], s, v[i]) + } + } + v1 := new(string) + if err := json.Unmarshal([]byte(s), v1); err != nil || *v1 != b64String { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v1) + } + trans.Reset() + trans.Close() +} + +func TestWriteJSONProtocolList(t *testing.T) { + thetype := "list" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + p.WriteListBegin(TType(DOUBLE), len(DOUBLE_VALUES)) + for _, value := range DOUBLE_VALUES { + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + } + p.WriteListEnd() + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) + } + str := trans.String() + str1 := new([]interface{}) + err := json.Unmarshal([]byte(str), str1) + if err != nil { + t.Fatalf("Unable to decode %s, wrote: %s", thetype, str) + } + l := *str1 + if len(l) < 2 { + t.Fatalf("List must be at least of length two to include metadata") + } + if l[0] != "dbl" { + t.Fatal("Invalid type for list, expected: ", STRING, ", but was: ", l[0]) + } + if int(l[1].(float64)) != len(DOUBLE_VALUES) { + t.Fatal("Invalid length for list, expected: ", len(DOUBLE_VALUES), ", but was: ", l[1]) + } + for k, value := range DOUBLE_VALUES { + s := l[k+2] + if math.IsInf(value, 1) { + if s.(string) != JSON_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_INFINITY), str) + } + } else if math.IsInf(value, 0) { + if s.(string) != JSON_NEGATIVE_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY), str) + } + } else if math.IsNaN(value) { + if s.(string) != JSON_NAN { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NAN), str) + } + } else { + if s.(float64) != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s'", thetype, value, s) + } + } + trans.Reset() + } + trans.Close() +} + +func TestWriteJSONProtocolSet(t *testing.T) { + thetype := "set" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + p.WriteSetBegin(TType(DOUBLE), len(DOUBLE_VALUES)) + for _, value := range DOUBLE_VALUES { + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + } + p.WriteSetEnd() + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) + } + str := trans.String() + str1 := new([]interface{}) + err := json.Unmarshal([]byte(str), str1) + if err != nil { + t.Fatalf("Unable to decode %s, wrote: %s", thetype, str) + } + l := *str1 + if len(l) < 2 { + t.Fatalf("Set must be at least of length two to include metadata") + } + if l[0] != "dbl" { + t.Fatal("Invalid type for set, expected: ", DOUBLE, ", but was: ", l[0]) + } + if int(l[1].(float64)) != len(DOUBLE_VALUES) { + t.Fatal("Invalid length for set, expected: ", len(DOUBLE_VALUES), ", but was: ", l[1]) + } + for k, value := range DOUBLE_VALUES { + s := l[k+2] + if math.IsInf(value, 1) { + if s.(string) != JSON_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_INFINITY), str) + } + } else if math.IsInf(value, 0) { + if s.(string) != JSON_NEGATIVE_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY), str) + } + } else if math.IsNaN(value) { + if s.(string) != JSON_NAN { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NAN), str) + } + } else { + if s.(float64) != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s'", thetype, value, s) + } + } + trans.Reset() + } + trans.Close() +} + +func TestWriteJSONProtocolMap(t *testing.T) { + thetype := "map" + trans := NewTMemoryBuffer() + p := NewTJSONProtocol(trans) + p.WriteMapBegin(TType(I32), TType(DOUBLE), len(DOUBLE_VALUES)) + for k, value := range DOUBLE_VALUES { + if e := p.WriteI32(int32(k)); e != nil { + t.Fatalf("Unable to write %s key int32 value %v due to error: %s", thetype, k, e.Error()) + } + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value float64 value %v due to error: %s", thetype, value, e.Error()) + } + } + p.WriteMapEnd() + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) + } + str := trans.String() + if str[0] != '[' || str[len(str)-1] != ']' { + t.Fatalf("Bad value for %s, wrote: %q, in go: %q", thetype, str, DOUBLE_VALUES) + } + expectedKeyType, expectedValueType, expectedSize, err := p.ReadMapBegin() + if err != nil { + t.Fatalf("Error while reading map begin: %s", err.Error()) + } + if expectedKeyType != I32 { + t.Fatal("Expected map key type ", I32, ", but was ", expectedKeyType) + } + if expectedValueType != DOUBLE { + t.Fatal("Expected map value type ", DOUBLE, ", but was ", expectedValueType) + } + if expectedSize != len(DOUBLE_VALUES) { + t.Fatal("Expected map size of ", len(DOUBLE_VALUES), ", but was ", expectedSize) + } + for k, value := range DOUBLE_VALUES { + ik, err := p.ReadI32() + if err != nil { + t.Fatalf("Bad key for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, ik, string(k), err.Error()) + } + if int(ik) != k { + t.Fatalf("Bad key for %s index %v, wrote: %v, expected: %v", thetype, k, ik, k) + } + dv, err := p.ReadDouble() + if err != nil { + t.Fatalf("Bad value for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, dv, value, err.Error()) + } + s := strconv.FormatFloat(dv, 'g', 10, 64) + if math.IsInf(value, 1) { + if !math.IsInf(dv, 1) { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_INFINITY)) + } + } else if math.IsInf(value, 0) { + if !math.IsInf(dv, 0) { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY)) + } + } else if math.IsNaN(value) { + if !math.IsNaN(dv) { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_NAN)) + } + } else { + expected := strconv.FormatFloat(value, 'g', 10, 64) + if s != expected { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected %v", thetype, k, value, s, expected) + } + v := float64(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + } + } + err = p.ReadMapEnd() + if err != nil { + t.Fatalf("Error while reading map end: %s", err.Error()) + } + trans.Close() +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/lowlevel_benchmarks_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/lowlevel_benchmarks_test.go new file mode 100644 index 0000000000..a5094ae97c --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/lowlevel_benchmarks_test.go @@ -0,0 +1,396 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "testing" +) + +var binaryProtoF = NewTBinaryProtocolFactoryDefault() +var compactProtoF = NewTCompactProtocolFactory() + +var buf = bytes.NewBuffer(make([]byte, 0, 1024)) + +var tfv = []TTransportFactory{ + NewTMemoryBufferTransportFactory(1024), + NewStreamTransportFactory(buf, buf, true), + NewTFramedTransportFactory(NewTMemoryBufferTransportFactory(1024)), +} + +func BenchmarkBinaryBool_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBool(b, p, trans) + } +} + +func BenchmarkBinaryByte_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteByte(b, p, trans) + } +} + +func BenchmarkBinaryI16_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI16(b, p, trans) + } +} + +func BenchmarkBinaryI32_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI32(b, p, trans) + } +} +func BenchmarkBinaryI64_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI64(b, p, trans) + } +} +func BenchmarkBinaryDouble_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteDouble(b, p, trans) + } +} +func BenchmarkBinaryString_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteString(b, p, trans) + } +} +func BenchmarkBinaryBinary_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBinary(b, p, trans) + } +} + +func BenchmarkBinaryBool_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBool(b, p, trans) + } +} + +func BenchmarkBinaryByte_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteByte(b, p, trans) + } +} + +func BenchmarkBinaryI16_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI16(b, p, trans) + } +} + +func BenchmarkBinaryI32_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI32(b, p, trans) + } +} +func BenchmarkBinaryI64_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI64(b, p, trans) + } +} +func BenchmarkBinaryDouble_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteDouble(b, p, trans) + } +} +func BenchmarkBinaryString_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteString(b, p, trans) + } +} +func BenchmarkBinaryBinary_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBinary(b, p, trans) + } +} + +func BenchmarkBinaryBool_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBool(b, p, trans) + } +} + +func BenchmarkBinaryByte_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteByte(b, p, trans) + } +} + +func BenchmarkBinaryI16_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI16(b, p, trans) + } +} + +func BenchmarkBinaryI32_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI32(b, p, trans) + } +} +func BenchmarkBinaryI64_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI64(b, p, trans) + } +} +func BenchmarkBinaryDouble_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteDouble(b, p, trans) + } +} +func BenchmarkBinaryString_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteString(b, p, trans) + } +} +func BenchmarkBinaryBinary_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := binaryProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBinary(b, p, trans) + } +} + +func BenchmarkCompactBool_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBool(b, p, trans) + } +} + +func BenchmarkCompactByte_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteByte(b, p, trans) + } +} + +func BenchmarkCompactI16_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI16(b, p, trans) + } +} + +func BenchmarkCompactI32_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI32(b, p, trans) + } +} +func BenchmarkCompactI64_0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI64(b, p, trans) + } +} +func BenchmarkCompactDouble0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteDouble(b, p, trans) + } +} +func BenchmarkCompactString0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteString(b, p, trans) + } +} +func BenchmarkCompactBinary0(b *testing.B) { + trans := tfv[0].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBinary(b, p, trans) + } +} + +func BenchmarkCompactBool_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBool(b, p, trans) + } +} + +func BenchmarkCompactByte_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteByte(b, p, trans) + } +} + +func BenchmarkCompactI16_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI16(b, p, trans) + } +} + +func BenchmarkCompactI32_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI32(b, p, trans) + } +} +func BenchmarkCompactI64_1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI64(b, p, trans) + } +} +func BenchmarkCompactDouble1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteDouble(b, p, trans) + } +} +func BenchmarkCompactString1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteString(b, p, trans) + } +} +func BenchmarkCompactBinary1(b *testing.B) { + trans := tfv[1].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBinary(b, p, trans) + } +} + +func BenchmarkCompactBool_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBool(b, p, trans) + } +} + +func BenchmarkCompactByte_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteByte(b, p, trans) + } +} + +func BenchmarkCompactI16_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI16(b, p, trans) + } +} + +func BenchmarkCompactI32_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI32(b, p, trans) + } +} +func BenchmarkCompactI64_2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteI64(b, p, trans) + } +} +func BenchmarkCompactDouble2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteDouble(b, p, trans) + } +} +func BenchmarkCompactString2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteString(b, p, trans) + } +} +func BenchmarkCompactBinary2(b *testing.B) { + trans := tfv[2].GetTransport(nil) + p := compactProtoF.GetProtocol(trans) + for i := 0; i < b.N; i++ { + ReadWriteBinary(b, p, trans) + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer_test.go new file mode 100644 index 0000000000..af2e8bfe52 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer_test.go @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "testing" +) + +func TestMemoryBuffer(t *testing.T) { + trans := NewTMemoryBufferLen(1024) + TransportTest(t, trans, trans) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_test.go new file mode 100644 index 0000000000..613eae6bc8 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_test.go @@ -0,0 +1,479 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "io/ioutil" + "math" + "net" + "net/http" + "testing" +) + +const PROTOCOL_BINARY_DATA_SIZE = 155 + +var ( + data string // test data for writing + protocol_bdata []byte // test data for writing; same as data + BOOL_VALUES []bool + BYTE_VALUES []int8 + INT16_VALUES []int16 + INT32_VALUES []int32 + INT64_VALUES []int64 + DOUBLE_VALUES []float64 + STRING_VALUES []string +) + +func init() { + protocol_bdata = make([]byte, PROTOCOL_BINARY_DATA_SIZE) + for i := 0; i < PROTOCOL_BINARY_DATA_SIZE; i++ { + protocol_bdata[i] = byte((i + 'a') % 255) + } + data = string(protocol_bdata) + BOOL_VALUES = []bool{false, true, false, false, true} + BYTE_VALUES = []int8{117, 0, 1, 32, 127, -128, -1} + INT16_VALUES = []int16{459, 0, 1, -1, -128, 127, 32767, -32768} + INT32_VALUES = []int32{459, 0, 1, -1, -128, 127, 32767, 2147483647, -2147483535} + INT64_VALUES = []int64{459, 0, 1, -1, -128, 127, 32767, 2147483647, -2147483535, 34359738481, -35184372088719, -9223372036854775808, 9223372036854775807} + DOUBLE_VALUES = []float64{459.3, 0.0, -1.0, 1.0, 0.5, 0.3333, 3.14159, 1.537e-38, 1.673e25, 6.02214179e23, -6.02214179e23, INFINITY.Float64(), NEGATIVE_INFINITY.Float64(), NAN.Float64()} + STRING_VALUES = []string{"", "a", "st[uf]f", "st,u:ff with spaces", "stuff\twith\nescape\\characters'...\"lots{of}fun"} +} + +type HTTPEchoServer struct{} +type HTTPHeaderEchoServer struct{} + +func (p *HTTPEchoServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + buf, err := ioutil.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + w.Write(buf) + } else { + w.WriteHeader(http.StatusOK) + w.Write(buf) + } +} + +func (p *HTTPHeaderEchoServer) ServeHTTP(w http.ResponseWriter, req *http.Request) { + buf, err := ioutil.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + w.Write(buf) + } else { + w.WriteHeader(http.StatusOK) + w.Write(buf) + } +} + +func HttpClientSetupForTest(t *testing.T) (net.Listener, net.Addr) { + addr, err := FindAvailableTCPServerPort(40000) + if err != nil { + t.Fatalf("Unable to find available tcp port addr: %s", err) + return nil, addr + } + l, err := net.Listen(addr.Network(), addr.String()) + if err != nil { + t.Fatalf("Unable to setup tcp listener on %s: %s", addr.String(), err) + return l, addr + } + go http.Serve(l, &HTTPEchoServer{}) + return l, addr +} + +func HttpClientSetupForHeaderTest(t *testing.T) (net.Listener, net.Addr) { + addr, err := FindAvailableTCPServerPort(40000) + if err != nil { + t.Fatalf("Unable to find available tcp port addr: %s", err) + return nil, addr + } + l, err := net.Listen(addr.Network(), addr.String()) + if err != nil { + t.Fatalf("Unable to setup tcp listener on %s: %s", addr.String(), err) + return l, addr + } + go http.Serve(l, &HTTPHeaderEchoServer{}) + return l, addr +} + +func ReadWriteProtocolTest(t *testing.T, protocolFactory TProtocolFactory) { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + l, addr := HttpClientSetupForTest(t) + defer l.Close() + transports := []TTransportFactory{ + NewTMemoryBufferTransportFactory(1024), + NewStreamTransportFactory(buf, buf, true), + NewTFramedTransportFactory(NewTMemoryBufferTransportFactory(1024)), + NewTHttpPostClientTransportFactory("http://" + addr.String()), + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteBool(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteByte(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteI16(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteI32(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteI64(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteDouble(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteString(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteBinary(t, p, trans) + trans.Close() + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + p := protocolFactory.GetProtocol(trans) + ReadWriteI64(t, p, trans) + ReadWriteDouble(t, p, trans) + ReadWriteBinary(t, p, trans) + ReadWriteByte(t, p, trans) + trans.Close() + } +} + +func ReadWriteBool(t testing.TB, p TProtocol, trans TTransport) { + thetype := TType(BOOL) + thelen := len(BOOL_VALUES) + err := p.WriteListBegin(thetype, thelen) + if err != nil { + t.Errorf("%s: %T %T %q Error writing list begin: %q", "ReadWriteBool", p, trans, err, thetype) + } + for k, v := range BOOL_VALUES { + err = p.WriteBool(v) + if err != nil { + t.Errorf("%s: %T %T %q Error writing bool in list at index %d: %q", "ReadWriteBool", p, trans, err, k, v) + } + } + p.WriteListEnd() + if err != nil { + t.Errorf("%s: %T %T %q Error writing list end: %q", "ReadWriteBool", p, trans, err, BOOL_VALUES) + } + p.Flush() + thetype2, thelen2, err := p.ReadListBegin() + if err != nil { + t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteBool", p, trans, err, BOOL_VALUES) + } + _, ok := p.(*TSimpleJSONProtocol) + if !ok { + if thetype != thetype2 { + t.Errorf("%s: %T %T type %s != type %s", "ReadWriteBool", p, trans, thetype, thetype2) + } + if thelen != thelen2 { + t.Errorf("%s: %T %T len %s != len %s", "ReadWriteBool", p, trans, thelen, thelen2) + } + } + for k, v := range BOOL_VALUES { + value, err := p.ReadBool() + if err != nil { + t.Errorf("%s: %T %T %q Error reading bool at index %d: %q", "ReadWriteBool", p, trans, err, k, v) + } + if v != value { + t.Errorf("%s: index %d %q %q %q != %q", "ReadWriteBool", k, p, trans, v, value) + } + } + err = p.ReadListEnd() + if err != nil { + t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteBool", p, trans, err) + } +} + +func ReadWriteByte(t testing.TB, p TProtocol, trans TTransport) { + thetype := TType(BYTE) + thelen := len(BYTE_VALUES) + err := p.WriteListBegin(thetype, thelen) + if err != nil { + t.Errorf("%s: %T %T %q Error writing list begin: %q", "ReadWriteByte", p, trans, err, thetype) + } + for k, v := range BYTE_VALUES { + err = p.WriteByte(v) + if err != nil { + t.Errorf("%s: %T %T %q Error writing byte in list at index %d: %q", "ReadWriteByte", p, trans, err, k, v) + } + } + err = p.WriteListEnd() + if err != nil { + t.Errorf("%s: %T %T %q Error writing list end: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES) + } + err = p.Flush() + if err != nil { + t.Errorf("%s: %T %T %q Error flushing list of bytes: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES) + } + thetype2, thelen2, err := p.ReadListBegin() + if err != nil { + t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteByte", p, trans, err, BYTE_VALUES) + } + _, ok := p.(*TSimpleJSONProtocol) + if !ok { + if thetype != thetype2 { + t.Errorf("%s: %T %T type %s != type %s", "ReadWriteByte", p, trans, thetype, thetype2) + } + if thelen != thelen2 { + t.Errorf("%s: %T %T len %s != len %s", "ReadWriteByte", p, trans, thelen, thelen2) + } + } + for k, v := range BYTE_VALUES { + value, err := p.ReadByte() + if err != nil { + t.Errorf("%s: %T %T %q Error reading byte at index %d: %q", "ReadWriteByte", p, trans, err, k, v) + } + if v != value { + t.Errorf("%s: %T %T %d != %d", "ReadWriteByte", p, trans, v, value) + } + } + err = p.ReadListEnd() + if err != nil { + t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteByte", p, trans, err) + } +} + +func ReadWriteI16(t testing.TB, p TProtocol, trans TTransport) { + thetype := TType(I16) + thelen := len(INT16_VALUES) + p.WriteListBegin(thetype, thelen) + for _, v := range INT16_VALUES { + p.WriteI16(v) + } + p.WriteListEnd() + p.Flush() + thetype2, thelen2, err := p.ReadListBegin() + if err != nil { + t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI16", p, trans, err, INT16_VALUES) + } + _, ok := p.(*TSimpleJSONProtocol) + if !ok { + if thetype != thetype2 { + t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI16", p, trans, thetype, thetype2) + } + if thelen != thelen2 { + t.Errorf("%s: %T %T len %s != len %s", "ReadWriteI16", p, trans, thelen, thelen2) + } + } + for k, v := range INT16_VALUES { + value, err := p.ReadI16() + if err != nil { + t.Errorf("%s: %T %T %q Error reading int16 at index %d: %q", "ReadWriteI16", p, trans, err, k, v) + } + if v != value { + t.Errorf("%s: %T %T %d != %d", "ReadWriteI16", p, trans, v, value) + } + } + err = p.ReadListEnd() + if err != nil { + t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteI16", p, trans, err) + } +} + +func ReadWriteI32(t testing.TB, p TProtocol, trans TTransport) { + thetype := TType(I32) + thelen := len(INT32_VALUES) + p.WriteListBegin(thetype, thelen) + for _, v := range INT32_VALUES { + p.WriteI32(v) + } + p.WriteListEnd() + p.Flush() + thetype2, thelen2, err := p.ReadListBegin() + if err != nil { + t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI32", p, trans, err, INT32_VALUES) + } + _, ok := p.(*TSimpleJSONProtocol) + if !ok { + if thetype != thetype2 { + t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI32", p, trans, thetype, thetype2) + } + if thelen != thelen2 { + t.Errorf("%s: %T %T len %s != len %s", "ReadWriteI32", p, trans, thelen, thelen2) + } + } + for k, v := range INT32_VALUES { + value, err := p.ReadI32() + if err != nil { + t.Errorf("%s: %T %T %q Error reading int32 at index %d: %q", "ReadWriteI32", p, trans, err, k, v) + } + if v != value { + t.Errorf("%s: %T %T %d != %d", "ReadWriteI32", p, trans, v, value) + } + } + if err != nil { + t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteI32", p, trans, err) + } +} + +func ReadWriteI64(t testing.TB, p TProtocol, trans TTransport) { + thetype := TType(I64) + thelen := len(INT64_VALUES) + p.WriteListBegin(thetype, thelen) + for _, v := range INT64_VALUES { + p.WriteI64(v) + } + p.WriteListEnd() + p.Flush() + thetype2, thelen2, err := p.ReadListBegin() + if err != nil { + t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteI64", p, trans, err, INT64_VALUES) + } + _, ok := p.(*TSimpleJSONProtocol) + if !ok { + if thetype != thetype2 { + t.Errorf("%s: %T %T type %s != type %s", "ReadWriteI64", p, trans, thetype, thetype2) + } + if thelen != thelen2 { + t.Errorf("%s: %T %T len %s != len %s", "ReadWriteI64", p, trans, thelen, thelen2) + } + } + for k, v := range INT64_VALUES { + value, err := p.ReadI64() + if err != nil { + t.Errorf("%s: %T %T %q Error reading int64 at index %d: %q", "ReadWriteI64", p, trans, err, k, v) + } + if v != value { + t.Errorf("%s: %T %T %q != %q", "ReadWriteI64", p, trans, v, value) + } + } + if err != nil { + t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteI64", p, trans, err) + } +} + +func ReadWriteDouble(t testing.TB, p TProtocol, trans TTransport) { + thetype := TType(DOUBLE) + thelen := len(DOUBLE_VALUES) + p.WriteListBegin(thetype, thelen) + for _, v := range DOUBLE_VALUES { + p.WriteDouble(v) + } + p.WriteListEnd() + p.Flush() + thetype2, thelen2, err := p.ReadListBegin() + if err != nil { + t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteDouble", p, trans, err, DOUBLE_VALUES) + } + if thetype != thetype2 { + t.Errorf("%s: %T %T type %s != type %s", "ReadWriteDouble", p, trans, thetype, thetype2) + } + if thelen != thelen2 { + t.Errorf("%s: %T %T len %s != len %s", "ReadWriteDouble", p, trans, thelen, thelen2) + } + for k, v := range DOUBLE_VALUES { + value, err := p.ReadDouble() + if err != nil { + t.Errorf("%s: %T %T %q Error reading double at index %d: %q", "ReadWriteDouble", p, trans, err, k, v) + } + if math.IsNaN(v) { + if !math.IsNaN(value) { + t.Errorf("%s: %T %T math.IsNaN(%q) != math.IsNaN(%q)", "ReadWriteDouble", p, trans, v, value) + } + } else if v != value { + t.Errorf("%s: %T %T %v != %q", "ReadWriteDouble", p, trans, v, value) + } + } + err = p.ReadListEnd() + if err != nil { + t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteDouble", p, trans, err) + } +} + +func ReadWriteString(t testing.TB, p TProtocol, trans TTransport) { + thetype := TType(STRING) + thelen := len(STRING_VALUES) + p.WriteListBegin(thetype, thelen) + for _, v := range STRING_VALUES { + p.WriteString(v) + } + p.WriteListEnd() + p.Flush() + thetype2, thelen2, err := p.ReadListBegin() + if err != nil { + t.Errorf("%s: %T %T %q Error reading list: %q", "ReadWriteString", p, trans, err, STRING_VALUES) + } + _, ok := p.(*TSimpleJSONProtocol) + if !ok { + if thetype != thetype2 { + t.Errorf("%s: %T %T type %s != type %s", "ReadWriteString", p, trans, thetype, thetype2) + } + if thelen != thelen2 { + t.Errorf("%s: %T %T len %s != len %s", "ReadWriteString", p, trans, thelen, thelen2) + } + } + for k, v := range STRING_VALUES { + value, err := p.ReadString() + if err != nil { + t.Errorf("%s: %T %T %q Error reading string at index %d: %q", "ReadWriteString", p, trans, err, k, v) + } + if v != value { + t.Errorf("%s: %T %T %d != %d", "ReadWriteString", p, trans, v, value) + } + } + if err != nil { + t.Errorf("%s: %T %T Unable to read list end: %q", "ReadWriteString", p, trans, err) + } +} + +func ReadWriteBinary(t testing.TB, p TProtocol, trans TTransport) { + v := protocol_bdata + p.WriteBinary(v) + p.Flush() + value, err := p.ReadBinary() + if err != nil { + t.Errorf("%s: %T %T Unable to read binary: %s", "ReadWriteBinary", p, trans, err.Error()) + } + if len(v) != len(value) { + t.Errorf("%s: %T %T len(v) != len(value)... %d != %d", "ReadWriteBinary", p, trans, len(v), len(value)) + } else { + for i := 0; i < len(v); i++ { + if v[i] != value[i] { + t.Errorf("%s: %T %T %s != %s", "ReadWriteBinary", p, trans, v, value) + } + } + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport_test.go new file mode 100644 index 0000000000..41513f812b --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport_test.go @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "errors" + "io" + "reflect" + "testing" +) + +func TestEnsureTransportsAreRich(t *testing.T) { + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + + transports := []TTransportFactory{ + NewTMemoryBufferTransportFactory(1024), + NewStreamTransportFactory(buf, buf, true), + NewTFramedTransportFactory(NewTMemoryBufferTransportFactory(1024)), + NewTHttpPostClientTransportFactory("http://127.0.0.1"), + } + for _, tf := range transports { + trans := tf.GetTransport(nil) + _, ok := trans.(TRichTransport) + if !ok { + t.Errorf("Transport %s does not implement TRichTransport interface", reflect.ValueOf(trans)) + } + } +} + +// TestReadByte tests whether readByte handles error cases correctly. +func TestReadByte(t *testing.T) { + for i, test := range readByteTests { + v, err := readByte(test.r) + if v != test.v { + t.Fatalf("TestReadByte %d: value differs. Expected %d, got %d", i, test.v, test.r.v) + } + if err != test.err { + t.Fatalf("TestReadByte %d: error differs. Expected %s, got %s", i, test.err, test.r.err) + } + } +} + +var someError = errors.New("Some error") +var readByteTests = []struct { + r *mockReader + v byte + err error +}{ + {&mockReader{0, 55, io.EOF}, 0, io.EOF}, // reader sends EOF w/o data + {&mockReader{0, 55, someError}, 0, someError}, // reader sends some other error + {&mockReader{1, 55, nil}, 55, nil}, // reader sends data w/o error + {&mockReader{1, 55, io.EOF}, 55, nil}, // reader sends data with EOF + {&mockReader{1, 55, someError}, 55, someError}, // reader sends data withsome error +} + +type mockReader struct { + n int + v byte + err error +} + +func (r *mockReader) Read(p []byte) (n int, err error) { + if r.n > 0 { + p[0] = r.v + } + return r.n, r.err +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/serializer_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/serializer_test.go new file mode 100644 index 0000000000..06d27a16b7 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/serializer_test.go @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "fmt" + "testing" +) + +type ProtocolFactory interface { + GetProtocol(t TTransport) TProtocol +} + +func compareStructs(m, m1 MyTestStruct) (bool, error) { + switch { + case m.On != m1.On: + return false, errors.New("Boolean not equal") + case m.B != m1.B: + return false, errors.New("Byte not equal") + case m.Int16 != m1.Int16: + return false, errors.New("Int16 not equal") + case m.Int32 != m1.Int32: + return false, errors.New("Int32 not equal") + case m.Int64 != m1.Int64: + return false, errors.New("Int64 not equal") + case m.D != m1.D: + return false, errors.New("Double not equal") + case m.St != m1.St: + return false, errors.New("String not equal") + + case len(m.Bin) != len(m1.Bin): + return false, errors.New("Binary size not equal") + case len(m.Bin) == len(m1.Bin): + for i := range m.Bin { + if m.Bin[i] != m1.Bin[i] { + return false, errors.New("Binary not equal") + } + } + case len(m.StringMap) != len(m1.StringMap): + return false, errors.New("StringMap size not equal") + case len(m.StringList) != len(m1.StringList): + return false, errors.New("StringList size not equal") + case len(m.StringSet) != len(m1.StringSet): + return false, errors.New("StringSet size not equal") + + case m.E != m1.E: + return false, errors.New("MyTestEnum not equal") + + default: + return true, nil + + } + return true, nil +} + +func ProtocolTest1(test *testing.T, pf ProtocolFactory) (bool, error) { + t := NewTSerializer() + t.Protocol = pf.GetProtocol(t.Transport) + var m = MyTestStruct{} + m.On = true + m.B = int8(0) + m.Int16 = 1 + m.Int32 = 2 + m.Int64 = 3 + m.D = 4.1 + m.St = "Test" + m.Bin = make([]byte, 10) + m.StringMap = make(map[string]string, 5) + m.StringList = make([]string, 5) + m.StringSet = make(map[string]struct{}, 5) + m.E = 2 + + s, err := t.WriteString(&m) + if err != nil { + return false, errors.New(fmt.Sprintf("Unable to Serialize struct\n\t %s", err)) + } + + t1 := NewTDeserializer() + t1.Protocol = pf.GetProtocol(t1.Transport) + var m1 = MyTestStruct{} + if err = t1.ReadString(&m1, s); err != nil { + return false, errors.New(fmt.Sprintf("Unable to Deserialize struct\n\t %s", err)) + + } + + return compareStructs(m, m1) + +} + +func ProtocolTest2(test *testing.T, pf ProtocolFactory) (bool, error) { + t := NewTSerializer() + t.Protocol = pf.GetProtocol(t.Transport) + var m = MyTestStruct{} + m.On = false + m.B = int8(0) + m.Int16 = 1 + m.Int32 = 2 + m.Int64 = 3 + m.D = 4.1 + m.St = "Test" + m.Bin = make([]byte, 10) + m.StringMap = make(map[string]string, 5) + m.StringList = make([]string, 5) + m.StringSet = make(map[string]struct{}, 5) + m.E = 2 + + s, err := t.WriteString(&m) + if err != nil { + return false, errors.New(fmt.Sprintf("Unable to Serialize struct\n\t %s", err)) + + } + + t1 := NewTDeserializer() + t1.Protocol = pf.GetProtocol(t1.Transport) + var m1 = MyTestStruct{} + if err = t1.ReadString(&m1, s); err != nil { + return false, errors.New(fmt.Sprintf("Unable to Deserialize struct\n\t %s", err)) + + } + + return compareStructs(m, m1) + +} + +func TestSerializer(t *testing.T) { + + var protocol_factories map[string]ProtocolFactory + protocol_factories = make(map[string]ProtocolFactory) + protocol_factories["Binary"] = NewTBinaryProtocolFactoryDefault() + protocol_factories["Compact"] = NewTCompactProtocolFactory() + //protocol_factories["SimpleJSON"] = NewTSimpleJSONProtocolFactory() - write only, can't be read back by design + protocol_factories["JSON"] = NewTJSONProtocolFactory() + + var tests map[string]func(*testing.T, ProtocolFactory) (bool, error) + tests = make(map[string]func(*testing.T, ProtocolFactory) (bool, error)) + tests["Test 1"] = ProtocolTest1 + tests["Test 2"] = ProtocolTest2 + //tests["Test 3"] = ProtocolTest3 // Example of how to add additional tests + + for name, pf := range protocol_factories { + + for test, f := range tests { + + if s, err := f(t, pf); !s || err != nil { + t.Errorf("%s Failed for %s protocol\n\t %s", test, name, err) + } + + } + } + +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/serializer_types_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/serializer_types_test.go new file mode 100644 index 0000000000..38ab8d6d60 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/serializer_types_test.go @@ -0,0 +1,633 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +/* THE FOLLOWING THRIFT FILE WAS USED TO CREATE THIS + +enum MyTestEnum { + FIRST = 1, + SECOND = 2, + THIRD = 3, + FOURTH = 4, +} + +struct MyTestStruct { + 1: bool on, + 2: byte b, + 3: i16 int16, + 4: i32 int32, + 5: i64 int64, + 6: double d, + 7: string st, + 8: binary bin, + 9: map stringMap, + 10: list stringList, + 11: set stringSet, + 12: MyTestEnum e, +} +*/ + +import ( + "fmt" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = ZERO +var _ = fmt.Printf + +var GoUnusedProtection__ int + +type MyTestEnum int64 + +const ( + MyTestEnum_FIRST MyTestEnum = 1 + MyTestEnum_SECOND MyTestEnum = 2 + MyTestEnum_THIRD MyTestEnum = 3 + MyTestEnum_FOURTH MyTestEnum = 4 +) + +func (p MyTestEnum) String() string { + switch p { + case MyTestEnum_FIRST: + return "FIRST" + case MyTestEnum_SECOND: + return "SECOND" + case MyTestEnum_THIRD: + return "THIRD" + case MyTestEnum_FOURTH: + return "FOURTH" + } + return "" +} + +func MyTestEnumFromString(s string) (MyTestEnum, error) { + switch s { + case "FIRST": + return MyTestEnum_FIRST, nil + case "SECOND": + return MyTestEnum_SECOND, nil + case "THIRD": + return MyTestEnum_THIRD, nil + case "FOURTH": + return MyTestEnum_FOURTH, nil + } + return MyTestEnum(0), fmt.Errorf("not a valid MyTestEnum string") +} + +func MyTestEnumPtr(v MyTestEnum) *MyTestEnum { return &v } + +type MyTestStruct struct { + On bool `thrift:"on,1" json:"on"` + B int8 `thrift:"b,2" json:"b"` + Int16 int16 `thrift:"int16,3" json:"int16"` + Int32 int32 `thrift:"int32,4" json:"int32"` + Int64 int64 `thrift:"int64,5" json:"int64"` + D float64 `thrift:"d,6" json:"d"` + St string `thrift:"st,7" json:"st"` + Bin []byte `thrift:"bin,8" json:"bin"` + StringMap map[string]string `thrift:"stringMap,9" json:"stringMap"` + StringList []string `thrift:"stringList,10" json:"stringList"` + StringSet map[string]struct{} `thrift:"stringSet,11" json:"stringSet"` + E MyTestEnum `thrift:"e,12" json:"e"` +} + +func NewMyTestStruct() *MyTestStruct { + return &MyTestStruct{} +} + +func (p *MyTestStruct) GetOn() bool { + return p.On +} + +func (p *MyTestStruct) GetB() int8 { + return p.B +} + +func (p *MyTestStruct) GetInt16() int16 { + return p.Int16 +} + +func (p *MyTestStruct) GetInt32() int32 { + return p.Int32 +} + +func (p *MyTestStruct) GetInt64() int64 { + return p.Int64 +} + +func (p *MyTestStruct) GetD() float64 { + return p.D +} + +func (p *MyTestStruct) GetSt() string { + return p.St +} + +func (p *MyTestStruct) GetBin() []byte { + return p.Bin +} + +func (p *MyTestStruct) GetStringMap() map[string]string { + return p.StringMap +} + +func (p *MyTestStruct) GetStringList() []string { + return p.StringList +} + +func (p *MyTestStruct) GetStringSet() map[string]struct{} { + return p.StringSet +} + +func (p *MyTestStruct) GetE() MyTestEnum { + return p.E +} +func (p *MyTestStruct) Read(iprot TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return PrependError(fmt.Sprintf("%T read error: ", p), err) + } + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == STOP { + break + } + switch fieldId { + case 1: + if err := p.readField1(iprot); err != nil { + return err + } + case 2: + if err := p.readField2(iprot); err != nil { + return err + } + case 3: + if err := p.readField3(iprot); err != nil { + return err + } + case 4: + if err := p.readField4(iprot); err != nil { + return err + } + case 5: + if err := p.readField5(iprot); err != nil { + return err + } + case 6: + if err := p.readField6(iprot); err != nil { + return err + } + case 7: + if err := p.readField7(iprot); err != nil { + return err + } + case 8: + if err := p.readField8(iprot); err != nil { + return err + } + case 9: + if err := p.readField9(iprot); err != nil { + return err + } + case 10: + if err := p.readField10(iprot); err != nil { + return err + } + case 11: + if err := p.readField11(iprot); err != nil { + return err + } + case 12: + if err := p.readField12(iprot); err != nil { + return err + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *MyTestStruct) readField1(iprot TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return PrependError("error reading field 1: ", err) + } else { + p.On = v + } + return nil +} + +func (p *MyTestStruct) readField2(iprot TProtocol) error { + if v, err := iprot.ReadByte(); err != nil { + return PrependError("error reading field 2: ", err) + } else { + temp := int8(v) + p.B = temp + } + return nil +} + +func (p *MyTestStruct) readField3(iprot TProtocol) error { + if v, err := iprot.ReadI16(); err != nil { + return PrependError("error reading field 3: ", err) + } else { + p.Int16 = v + } + return nil +} + +func (p *MyTestStruct) readField4(iprot TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return PrependError("error reading field 4: ", err) + } else { + p.Int32 = v + } + return nil +} + +func (p *MyTestStruct) readField5(iprot TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return PrependError("error reading field 5: ", err) + } else { + p.Int64 = v + } + return nil +} + +func (p *MyTestStruct) readField6(iprot TProtocol) error { + if v, err := iprot.ReadDouble(); err != nil { + return PrependError("error reading field 6: ", err) + } else { + p.D = v + } + return nil +} + +func (p *MyTestStruct) readField7(iprot TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return PrependError("error reading field 7: ", err) + } else { + p.St = v + } + return nil +} + +func (p *MyTestStruct) readField8(iprot TProtocol) error { + if v, err := iprot.ReadBinary(); err != nil { + return PrependError("error reading field 8: ", err) + } else { + p.Bin = v + } + return nil +} + +func (p *MyTestStruct) readField9(iprot TProtocol) error { + _, _, size, err := iprot.ReadMapBegin() + if err != nil { + return PrependError("error reading map begin: ", err) + } + tMap := make(map[string]string, size) + p.StringMap = tMap + for i := 0; i < size; i++ { + var _key0 string + if v, err := iprot.ReadString(); err != nil { + return PrependError("error reading field 0: ", err) + } else { + _key0 = v + } + var _val1 string + if v, err := iprot.ReadString(); err != nil { + return PrependError("error reading field 0: ", err) + } else { + _val1 = v + } + p.StringMap[_key0] = _val1 + } + if err := iprot.ReadMapEnd(); err != nil { + return PrependError("error reading map end: ", err) + } + return nil +} + +func (p *MyTestStruct) readField10(iprot TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return PrependError("error reading list begin: ", err) + } + tSlice := make([]string, 0, size) + p.StringList = tSlice + for i := 0; i < size; i++ { + var _elem2 string + if v, err := iprot.ReadString(); err != nil { + return PrependError("error reading field 0: ", err) + } else { + _elem2 = v + } + p.StringList = append(p.StringList, _elem2) + } + if err := iprot.ReadListEnd(); err != nil { + return PrependError("error reading list end: ", err) + } + return nil +} + +func (p *MyTestStruct) readField11(iprot TProtocol) error { + _, size, err := iprot.ReadSetBegin() + if err != nil { + return PrependError("error reading set begin: ", err) + } + tSet := make(map[string]struct{}, size) + p.StringSet = tSet + for i := 0; i < size; i++ { + var _elem3 string + if v, err := iprot.ReadString(); err != nil { + return PrependError("error reading field 0: ", err) + } else { + _elem3 = v + } + p.StringSet[_elem3] = struct{}{} + } + if err := iprot.ReadSetEnd(); err != nil { + return PrependError("error reading set end: ", err) + } + return nil +} + +func (p *MyTestStruct) readField12(iprot TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return PrependError("error reading field 12: ", err) + } else { + temp := MyTestEnum(v) + p.E = temp + } + return nil +} + +func (p *MyTestStruct) Write(oprot TProtocol) error { + if err := oprot.WriteStructBegin("MyTestStruct"); err != nil { + return PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) + } + if err := p.writeField1(oprot); err != nil { + return err + } + if err := p.writeField2(oprot); err != nil { + return err + } + if err := p.writeField3(oprot); err != nil { + return err + } + if err := p.writeField4(oprot); err != nil { + return err + } + if err := p.writeField5(oprot); err != nil { + return err + } + if err := p.writeField6(oprot); err != nil { + return err + } + if err := p.writeField7(oprot); err != nil { + return err + } + if err := p.writeField8(oprot); err != nil { + return err + } + if err := p.writeField9(oprot); err != nil { + return err + } + if err := p.writeField10(oprot); err != nil { + return err + } + if err := p.writeField11(oprot); err != nil { + return err + } + if err := p.writeField12(oprot); err != nil { + return err + } + if err := oprot.WriteFieldStop(); err != nil { + return PrependError("write field stop error: ", err) + } + if err := oprot.WriteStructEnd(); err != nil { + return PrependError("write struct stop error: ", err) + } + return nil +} + +func (p *MyTestStruct) writeField1(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("on", BOOL, 1); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 1:on: ", p), err) + } + if err := oprot.WriteBool(bool(p.On)); err != nil { + return PrependError(fmt.Sprintf("%T.on (1) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 1:on: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField2(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("b", BYTE, 2); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 2:b: ", p), err) + } + if err := oprot.WriteByte(int8(p.B)); err != nil { + return PrependError(fmt.Sprintf("%T.b (2) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 2:b: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField3(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("int16", I16, 3); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 3:int16: ", p), err) + } + if err := oprot.WriteI16(int16(p.Int16)); err != nil { + return PrependError(fmt.Sprintf("%T.int16 (3) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 3:int16: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField4(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("int32", I32, 4); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 4:int32: ", p), err) + } + if err := oprot.WriteI32(int32(p.Int32)); err != nil { + return PrependError(fmt.Sprintf("%T.int32 (4) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 4:int32: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField5(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("int64", I64, 5); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 5:int64: ", p), err) + } + if err := oprot.WriteI64(int64(p.Int64)); err != nil { + return PrependError(fmt.Sprintf("%T.int64 (5) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 5:int64: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField6(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("d", DOUBLE, 6); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 6:d: ", p), err) + } + if err := oprot.WriteDouble(float64(p.D)); err != nil { + return PrependError(fmt.Sprintf("%T.d (6) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 6:d: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField7(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("st", STRING, 7); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 7:st: ", p), err) + } + if err := oprot.WriteString(string(p.St)); err != nil { + return PrependError(fmt.Sprintf("%T.st (7) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 7:st: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField8(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("bin", STRING, 8); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 8:bin: ", p), err) + } + if err := oprot.WriteBinary(p.Bin); err != nil { + return PrependError(fmt.Sprintf("%T.bin (8) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 8:bin: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField9(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stringMap", MAP, 9); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 9:stringMap: ", p), err) + } + if err := oprot.WriteMapBegin(STRING, STRING, len(p.StringMap)); err != nil { + return PrependError("error writing map begin: ", err) + } + for k, v := range p.StringMap { + if err := oprot.WriteString(string(k)); err != nil { + return PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + if err := oprot.WriteString(string(v)); err != nil { + return PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteMapEnd(); err != nil { + return PrependError("error writing map end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 9:stringMap: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField10(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stringList", LIST, 10); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 10:stringList: ", p), err) + } + if err := oprot.WriteListBegin(STRING, len(p.StringList)); err != nil { + return PrependError("error writing list begin: ", err) + } + for _, v := range p.StringList { + if err := oprot.WriteString(string(v)); err != nil { + return PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 10:stringList: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField11(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("stringSet", SET, 11); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 11:stringSet: ", p), err) + } + if err := oprot.WriteSetBegin(STRING, len(p.StringSet)); err != nil { + return PrependError("error writing set begin: ", err) + } + for v, _ := range p.StringSet { + if err := oprot.WriteString(string(v)); err != nil { + return PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err) + } + } + if err := oprot.WriteSetEnd(); err != nil { + return PrependError("error writing set end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 11:stringSet: ", p), err) + } + return err +} + +func (p *MyTestStruct) writeField12(oprot TProtocol) (err error) { + if err := oprot.WriteFieldBegin("e", I32, 12); err != nil { + return PrependError(fmt.Sprintf("%T write field begin error 12:e: ", p), err) + } + if err := oprot.WriteI32(int32(p.E)); err != nil { + return PrependError(fmt.Sprintf("%T.e (12) field write error: ", p), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return PrependError(fmt.Sprintf("%T write field end error 12:e: ", p), err) + } + return err +} + +func (p *MyTestStruct) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("MyTestStruct(%+v)", *p) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket_test.go new file mode 100644 index 0000000000..f08e8e900d --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket_test.go @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "fmt" + "testing" +) + +func TestSocketIsntListeningAfterInterrupt(t *testing.T) { + host := "127.0.0.1" + port := 9090 + addr := fmt.Sprintf("%s:%d", host, port) + + socket := CreateServerSocket(t, addr) + socket.Listen() + socket.Interrupt() + + newSocket := CreateServerSocket(t, addr) + err := newSocket.Listen() + defer newSocket.Interrupt() + if err != nil { + t.Fatalf("Failed to rebinds: %s", err) + } +} + +func CreateServerSocket(t *testing.T, addr string) *TServerSocket { + socket, err := NewTServerSocket(addr) + if err != nil { + t.Fatalf("Failed to create server socket: %s", err) + } + return socket +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_test.go new file mode 100644 index 0000000000..ffaf457027 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_test.go @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "testing" +) + +func TestNothing(t *testing.T) { + +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go new file mode 100644 index 0000000000..8f0dcc9dfe --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol_test.go @@ -0,0 +1,715 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "math" + "strconv" + "strings" + "testing" +) + +func TestWriteSimpleJSONProtocolBool(t *testing.T) { + thetype := "boolean" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + for _, value := range BOOL_VALUES { + if e := p.WriteBool(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := false + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadSimpleJSONProtocolBool(t *testing.T) { + thetype := "boolean" + for _, value := range BOOL_VALUES { + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + if value { + trans.Write(JSON_TRUE) + } else { + trans.Write(JSON_FALSE) + } + trans.Flush() + s := trans.String() + v, e := p.ReadBool() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteSimpleJSONProtocolByte(t *testing.T) { + thetype := "byte" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + for _, value := range BYTE_VALUES { + if e := p.WriteByte(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int8(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadSimpleJSONProtocolByte(t *testing.T) { + thetype := "byte" + for _, value := range BYTE_VALUES { + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(strconv.Itoa(int(value))) + trans.Flush() + s := trans.String() + v, e := p.ReadByte() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteSimpleJSONProtocolI16(t *testing.T) { + thetype := "int16" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + for _, value := range INT16_VALUES { + if e := p.WriteI16(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int16(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadSimpleJSONProtocolI16(t *testing.T) { + thetype := "int16" + for _, value := range INT16_VALUES { + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(strconv.Itoa(int(value))) + trans.Flush() + s := trans.String() + v, e := p.ReadI16() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestWriteSimpleJSONProtocolI32(t *testing.T) { + thetype := "int32" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + for _, value := range INT32_VALUES { + if e := p.WriteI32(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int32(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadSimpleJSONProtocolI32(t *testing.T) { + thetype := "int32" + for _, value := range INT32_VALUES { + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(strconv.Itoa(int(value))) + trans.Flush() + s := trans.String() + v, e := p.ReadI32() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestReadSimpleJSONProtocolI32Null(t *testing.T) { + thetype := "int32" + value := "null" + + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(value) + trans.Flush() + s := trans.String() + v, e := p.ReadI32() + + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != 0 { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + trans.Reset() + trans.Close() +} + +func TestWriteSimpleJSONProtocolI64(t *testing.T) { + thetype := "int64" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + for _, value := range INT64_VALUES { + if e := p.WriteI64(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := int64(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadSimpleJSONProtocolI64(t *testing.T) { + thetype := "int64" + for _, value := range INT64_VALUES { + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(strconv.FormatInt(value, 10)) + trans.Flush() + s := trans.String() + v, e := p.ReadI64() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + trans.Reset() + trans.Close() + } +} + +func TestReadSimpleJSONProtocolI64Null(t *testing.T) { + thetype := "int32" + value := "null" + + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(value) + trans.Flush() + s := trans.String() + v, e := p.ReadI64() + + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != 0 { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + trans.Reset() + trans.Close() +} + +func TestWriteSimpleJSONProtocolDouble(t *testing.T) { + thetype := "double" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + for _, value := range DOUBLE_VALUES { + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if math.IsInf(value, 1) { + if s != jsonQuote(JSON_INFINITY) { + t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_INFINITY)) + } + } else if math.IsInf(value, -1) { + if s != jsonQuote(JSON_NEGATIVE_INFINITY) { + t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_NEGATIVE_INFINITY)) + } + } else if math.IsNaN(value) { + if s != jsonQuote(JSON_NAN) { + t.Fatalf("Bad value for %s %v, wrote: %v, expected: %v", thetype, value, s, jsonQuote(JSON_NAN)) + } + } else { + if s != fmt.Sprint(value) { + t.Fatalf("Bad value for %s %v: %s", thetype, value, s) + } + v := float64(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + } + trans.Reset() + } + trans.Close() +} + +func TestReadSimpleJSONProtocolDouble(t *testing.T) { + thetype := "double" + for _, value := range DOUBLE_VALUES { + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + n := NewNumericFromDouble(value) + trans.WriteString(n.String()) + trans.Flush() + s := trans.String() + v, e := p.ReadDouble() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if math.IsInf(value, 1) { + if !math.IsInf(v, 1) { + t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v) + } + } else if math.IsInf(value, -1) { + if !math.IsInf(v, -1) { + t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v) + } + } else if math.IsNaN(value) { + if !math.IsNaN(v) { + t.Fatalf("Bad value for %s %v, wrote: %v, received: %v", thetype, value, s, v) + } + } else { + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + } + trans.Reset() + trans.Close() + } +} + +func TestWriteSimpleJSONProtocolString(t *testing.T) { + thetype := "string" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + for _, value := range STRING_VALUES { + if e := p.WriteString(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s[0] != '"' || s[len(s)-1] != '"' { + t.Fatalf("Bad value for %s '%v', wrote '%v', expected: %v", thetype, value, s, fmt.Sprint("\"", value, "\"")) + } + v := new(string) + if err := json.Unmarshal([]byte(s), v); err != nil || *v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v) + } + trans.Reset() + } + trans.Close() +} + +func TestReadSimpleJSONProtocolString(t *testing.T) { + thetype := "string" + for _, value := range STRING_VALUES { + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(jsonQuote(value)) + trans.Flush() + s := trans.String() + v, e := p.ReadString() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != value { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + v1 := new(string) + if err := json.Unmarshal([]byte(s), v1); err != nil || *v1 != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v1) + } + trans.Reset() + trans.Close() + } +} +func TestReadSimpleJSONProtocolStringNull(t *testing.T) { + thetype := "string" + value := "null" + + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(value) + trans.Flush() + s := trans.String() + v, e := p.ReadString() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != "" { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + trans.Reset() + trans.Close() +} + +func TestWriteSimpleJSONProtocolBinary(t *testing.T) { + thetype := "binary" + value := protocol_bdata + b64value := make([]byte, base64.StdEncoding.EncodedLen(len(protocol_bdata))) + base64.StdEncoding.Encode(b64value, value) + b64String := string(b64value) + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + if e := p.WriteBinary(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s value %v due to error flushing: %s", thetype, value, e.Error()) + } + s := trans.String() + if s != fmt.Sprint("\"", b64String, "\"") { + t.Fatalf("Bad value for %s %v\n wrote: %v\nexpected: %v", thetype, value, s, "\""+b64String+"\"") + } + v1 := new(string) + if err := json.Unmarshal([]byte(s), v1); err != nil || *v1 != b64String { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v1) + } + trans.Close() +} + +func TestReadSimpleJSONProtocolBinary(t *testing.T) { + thetype := "binary" + value := protocol_bdata + b64value := make([]byte, base64.StdEncoding.EncodedLen(len(protocol_bdata))) + base64.StdEncoding.Encode(b64value, value) + b64String := string(b64value) + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(jsonQuote(b64String)) + trans.Flush() + s := trans.String() + v, e := p.ReadBinary() + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if len(v) != len(value) { + t.Fatalf("Bad value for %s value length %v, wrote: %v, received length: %v", thetype, len(value), s, len(v)) + } + for i := 0; i < len(v); i++ { + if v[i] != value[i] { + t.Fatalf("Bad value for %s at index %d value %v, wrote: %v, received: %v", thetype, i, value[i], s, v[i]) + } + } + v1 := new(string) + if err := json.Unmarshal([]byte(s), v1); err != nil || *v1 != b64String { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, *v1) + } + trans.Reset() + trans.Close() +} + +func TestReadSimpleJSONProtocolBinaryNull(t *testing.T) { + thetype := "binary" + value := "null" + + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + trans.WriteString(value) + trans.Flush() + s := trans.String() + b, e := p.ReadBinary() + v := string(b) + + if e != nil { + t.Fatalf("Unable to read %s value %v due to error: %s", thetype, value, e.Error()) + } + if v != "" { + t.Fatalf("Bad value for %s value %v, wrote: %v, received: %v", thetype, value, s, v) + } + trans.Reset() + trans.Close() +} + +func TestWriteSimpleJSONProtocolList(t *testing.T) { + thetype := "list" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + p.WriteListBegin(TType(DOUBLE), len(DOUBLE_VALUES)) + for _, value := range DOUBLE_VALUES { + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + } + p.WriteListEnd() + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) + } + str := trans.String() + str1 := new([]interface{}) + err := json.Unmarshal([]byte(str), str1) + if err != nil { + t.Fatalf("Unable to decode %s, wrote: %s", thetype, str) + } + l := *str1 + if len(l) < 2 { + t.Fatalf("List must be at least of length two to include metadata") + } + if int(l[0].(float64)) != DOUBLE { + t.Fatal("Invalid type for list, expected: ", DOUBLE, ", but was: ", l[0]) + } + if int(l[1].(float64)) != len(DOUBLE_VALUES) { + t.Fatal("Invalid length for list, expected: ", len(DOUBLE_VALUES), ", but was: ", l[1]) + } + for k, value := range DOUBLE_VALUES { + s := l[k+2] + if math.IsInf(value, 1) { + if s.(string) != JSON_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_INFINITY), str) + } + } else if math.IsInf(value, 0) { + if s.(string) != JSON_NEGATIVE_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY), str) + } + } else if math.IsNaN(value) { + if s.(string) != JSON_NAN { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NAN), str) + } + } else { + if s.(float64) != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s'", thetype, value, s) + } + } + trans.Reset() + } + trans.Close() +} + +func TestWriteSimpleJSONProtocolSet(t *testing.T) { + thetype := "set" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + p.WriteSetBegin(TType(DOUBLE), len(DOUBLE_VALUES)) + for _, value := range DOUBLE_VALUES { + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value %v due to error: %s", thetype, value, e.Error()) + } + } + p.WriteSetEnd() + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) + } + str := trans.String() + str1 := new([]interface{}) + err := json.Unmarshal([]byte(str), str1) + if err != nil { + t.Fatalf("Unable to decode %s, wrote: %s", thetype, str) + } + l := *str1 + if len(l) < 2 { + t.Fatalf("Set must be at least of length two to include metadata") + } + if int(l[0].(float64)) != DOUBLE { + t.Fatal("Invalid type for set, expected: ", DOUBLE, ", but was: ", l[0]) + } + if int(l[1].(float64)) != len(DOUBLE_VALUES) { + t.Fatal("Invalid length for set, expected: ", len(DOUBLE_VALUES), ", but was: ", l[1]) + } + for k, value := range DOUBLE_VALUES { + s := l[k+2] + if math.IsInf(value, 1) { + if s.(string) != JSON_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_INFINITY), str) + } + } else if math.IsInf(value, 0) { + if s.(string) != JSON_NEGATIVE_INFINITY { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY), str) + } + } else if math.IsNaN(value) { + if s.(string) != JSON_NAN { + t.Fatalf("Bad value for %s at index %v %v, wrote: %q, expected: %q, originally wrote: %q", thetype, k, value, s, jsonQuote(JSON_NAN), str) + } + } else { + if s.(float64) != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s'", thetype, value, s) + } + } + trans.Reset() + } + trans.Close() +} + +func TestWriteSimpleJSONProtocolMap(t *testing.T) { + thetype := "map" + trans := NewTMemoryBuffer() + p := NewTSimpleJSONProtocol(trans) + p.WriteMapBegin(TType(I32), TType(DOUBLE), len(DOUBLE_VALUES)) + for k, value := range DOUBLE_VALUES { + if e := p.WriteI32(int32(k)); e != nil { + t.Fatalf("Unable to write %s key int32 value %v due to error: %s", thetype, k, e.Error()) + } + if e := p.WriteDouble(value); e != nil { + t.Fatalf("Unable to write %s value float64 value %v due to error: %s", thetype, value, e.Error()) + } + } + p.WriteMapEnd() + if e := p.Flush(); e != nil { + t.Fatalf("Unable to write %s due to error flushing: %s", thetype, e.Error()) + } + str := trans.String() + if str[0] != '[' || str[len(str)-1] != ']' { + t.Fatalf("Bad value for %s, wrote: %q, in go: %q", thetype, str, DOUBLE_VALUES) + } + l := strings.Split(str[1:len(str)-1], ",") + if len(l) < 3 { + t.Fatal("Expected list of at least length 3 for map for metadata, but was of length ", len(l)) + } + expectedKeyType, _ := strconv.Atoi(l[0]) + expectedValueType, _ := strconv.Atoi(l[1]) + expectedSize, _ := strconv.Atoi(l[2]) + if expectedKeyType != I32 { + t.Fatal("Expected map key type ", I32, ", but was ", l[0]) + } + if expectedValueType != DOUBLE { + t.Fatal("Expected map value type ", DOUBLE, ", but was ", l[1]) + } + if expectedSize != len(DOUBLE_VALUES) { + t.Fatal("Expected map size of ", len(DOUBLE_VALUES), ", but was ", l[2]) + } + for k, value := range DOUBLE_VALUES { + strk := l[k*2+3] + strv := l[k*2+4] + ik, err := strconv.Atoi(strk) + if err != nil { + t.Fatalf("Bad value for %s index %v, wrote: %v, expected: %v, error: %s", thetype, k, strk, string(k), err.Error()) + } + if ik != k { + t.Fatalf("Bad value for %s index %v, wrote: %v, expected: %v", thetype, k, strk, k) + } + s := strv + if math.IsInf(value, 1) { + if s != jsonQuote(JSON_INFINITY) { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_INFINITY)) + } + } else if math.IsInf(value, 0) { + if s != jsonQuote(JSON_NEGATIVE_INFINITY) { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_NEGATIVE_INFINITY)) + } + } else if math.IsNaN(value) { + if s != jsonQuote(JSON_NAN) { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected: %v", thetype, k, value, s, jsonQuote(JSON_NAN)) + } + } else { + expected := strconv.FormatFloat(value, 'g', 10, 64) + if s != expected { + t.Fatalf("Bad value for %s at index %v %v, wrote: %v, expected %v", thetype, k, value, s, expected) + } + v := float64(0) + if err := json.Unmarshal([]byte(s), &v); err != nil || v != value { + t.Fatalf("Bad json-decoded value for %s %v, wrote: '%s', expected: '%v'", thetype, value, s, v) + } + } + trans.Reset() + } + trans.Close() +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception_test.go new file mode 100644 index 0000000000..b44314f490 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception_test.go @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "fmt" + "io" + + "testing" +) + +type timeout struct{ timedout bool } + +func (t *timeout) Timeout() bool { + return t.timedout +} + +func (t *timeout) Error() string { + return fmt.Sprintf("Timeout: %v", t.timedout) +} + +func TestTExceptionTimeout(t *testing.T) { + timeout := &timeout{true} + exception := NewTTransportExceptionFromError(timeout) + if timeout.Error() != exception.Error() { + t.Fatalf("Error did not match: expected %q, got %q", timeout.Error(), exception.Error()) + } + + if exception.TypeId() != TIMED_OUT { + t.Fatalf("TypeId was not TIMED_OUT: expected %v, got %v", TIMED_OUT, exception.TypeId()) + } +} + +func TestTExceptionEOF(t *testing.T) { + exception := NewTTransportExceptionFromError(io.EOF) + if io.EOF.Error() != exception.Error() { + t.Fatalf("Error did not match: expected %q, got %q", io.EOF.Error(), exception.Error()) + } + + if exception.TypeId() != END_OF_FILE { + t.Fatalf("TypeId was not END_OF_FILE: expected %v, got %v", END_OF_FILE, exception.TypeId()) + } +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_test.go new file mode 100644 index 0000000000..864958a9da --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_test.go @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "io" + "net" + "strconv" + "testing" +) + +const TRANSPORT_BINARY_DATA_SIZE = 4096 + +var ( + transport_bdata []byte // test data for writing; same as data + transport_header map[string]string +) + +func init() { + transport_bdata = make([]byte, TRANSPORT_BINARY_DATA_SIZE) + for i := 0; i < TRANSPORT_BINARY_DATA_SIZE; i++ { + transport_bdata[i] = byte((i + 'a') % 255) + } + transport_header = map[string]string{"key": "User-Agent", + "value": "Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36"} +} + +func TransportTest(t *testing.T, writeTrans TTransport, readTrans TTransport) { + buf := make([]byte, TRANSPORT_BINARY_DATA_SIZE) + if !writeTrans.IsOpen() { + t.Fatalf("Transport %T not open: %s", writeTrans, writeTrans) + } + if !readTrans.IsOpen() { + t.Fatalf("Transport %T not open: %s", readTrans, readTrans) + } + _, err := writeTrans.Write(transport_bdata) + if err != nil { + t.Fatalf("Transport %T cannot write binary data of length %d: %s", writeTrans, len(transport_bdata), err) + } + err = writeTrans.Flush() + if err != nil { + t.Fatalf("Transport %T cannot flush write of binary data: %s", writeTrans, err) + } + n, err := io.ReadFull(readTrans, buf) + if err != nil { + t.Errorf("Transport %T cannot read binary data of length %d: %s", readTrans, TRANSPORT_BINARY_DATA_SIZE, err) + } + if n != TRANSPORT_BINARY_DATA_SIZE { + t.Errorf("Transport %T read only %d instead of %d bytes of binary data", readTrans, n, TRANSPORT_BINARY_DATA_SIZE) + } + for k, v := range buf { + if v != transport_bdata[k] { + t.Fatalf("Transport %T read %d instead of %d for index %d of binary data 2", readTrans, v, transport_bdata[k], k) + } + } + _, err = writeTrans.Write(transport_bdata) + if err != nil { + t.Fatalf("Transport %T cannot write binary data 2 of length %d: %s", writeTrans, len(transport_bdata), err) + } + err = writeTrans.Flush() + if err != nil { + t.Fatalf("Transport %T cannot flush write binary data 2: %s", writeTrans, err) + } + buf = make([]byte, TRANSPORT_BINARY_DATA_SIZE) + read := 1 + for n = 0; n < TRANSPORT_BINARY_DATA_SIZE && read != 0; { + read, err = readTrans.Read(buf[n:]) + if err != nil { + t.Errorf("Transport %T cannot read binary data 2 of total length %d from offset %d: %s", readTrans, TRANSPORT_BINARY_DATA_SIZE, n, err) + } + n += read + } + if n != TRANSPORT_BINARY_DATA_SIZE { + t.Errorf("Transport %T read only %d instead of %d bytes of binary data 2", readTrans, n, TRANSPORT_BINARY_DATA_SIZE) + } + for k, v := range buf { + if v != transport_bdata[k] { + t.Fatalf("Transport %T read %d instead of %d for index %d of binary data 2", readTrans, v, transport_bdata[k], k) + } + } +} + +func TransportHeaderTest(t *testing.T, writeTrans TTransport, readTrans TTransport) { + buf := make([]byte, TRANSPORT_BINARY_DATA_SIZE) + if !writeTrans.IsOpen() { + t.Fatalf("Transport %T not open: %s", writeTrans, writeTrans) + } + if !readTrans.IsOpen() { + t.Fatalf("Transport %T not open: %s", readTrans, readTrans) + } + // Need to assert type of TTransport to THttpClient to expose the Setter + httpWPostTrans := writeTrans.(*THttpClient) + httpWPostTrans.SetHeader(transport_header["key"], transport_header["value"]) + + _, err := writeTrans.Write(transport_bdata) + if err != nil { + t.Fatalf("Transport %T cannot write binary data of length %d: %s", writeTrans, len(transport_bdata), err) + } + err = writeTrans.Flush() + if err != nil { + t.Fatalf("Transport %T cannot flush write of binary data: %s", writeTrans, err) + } + // Need to assert type of TTransport to THttpClient to expose the Getter + httpRPostTrans := readTrans.(*THttpClient) + readHeader := httpRPostTrans.GetHeader(transport_header["key"]) + if err != nil { + t.Errorf("Transport %T cannot read HTTP Header Value", httpRPostTrans) + } + + if transport_header["value"] != readHeader { + t.Errorf("Expected HTTP Header Value %s, got %s", transport_header["value"], readHeader) + } + n, err := io.ReadFull(readTrans, buf) + if err != nil { + t.Errorf("Transport %T cannot read binary data of length %d: %s", readTrans, TRANSPORT_BINARY_DATA_SIZE, err) + } + if n != TRANSPORT_BINARY_DATA_SIZE { + t.Errorf("Transport %T read only %d instead of %d bytes of binary data", readTrans, n, TRANSPORT_BINARY_DATA_SIZE) + } + for k, v := range buf { + if v != transport_bdata[k] { + t.Fatalf("Transport %T read %d instead of %d for index %d of binary data 2", readTrans, v, transport_bdata[k], k) + } + } +} + +func CloseTransports(t *testing.T, readTrans TTransport, writeTrans TTransport) { + err := readTrans.Close() + if err != nil { + t.Errorf("Transport %T cannot close read transport: %s", readTrans, err) + } + if writeTrans != readTrans { + err = writeTrans.Close() + if err != nil { + t.Errorf("Transport %T cannot close write transport: %s", writeTrans, err) + } + } +} + +func FindAvailableTCPServerPort(startPort int) (net.Addr, error) { + for i := startPort; i < 65535; i++ { + s := "127.0.0.1:" + strconv.Itoa(i) + l, err := net.Listen("tcp", s) + if err == nil { + l.Close() + return net.ResolveTCPAddr("tcp", s) + } + } + return nil, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Could not find available server port") +} + +func valueInSlice(value string, slice []string) bool { + for _, v := range slice { + if value == v { + return true + } + } + return false +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport_test.go b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport_test.go new file mode 100644 index 0000000000..f57610cbd2 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport_test.go @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "compress/zlib" + "testing" +) + +func TestZlibTransport(t *testing.T) { + trans, err := NewTZlibTransport(NewTMemoryBuffer(), zlib.BestCompression) + if err != nil { + t.Fatal(err) + } + TransportTest(t, trans, trans) +} diff --git a/vendor/github.com/apache/thrift/package.json b/vendor/github.com/apache/thrift/package.json new file mode 100644 index 0000000000..edfc3553ed --- /dev/null +++ b/vendor/github.com/apache/thrift/package.json @@ -0,0 +1,54 @@ +{ + "name": "thrift", + "description": "node.js bindings for the Apache Thrift RPC system", + "homepage": "http://thrift.apache.org/", + "repository": { + "type": "git", + "url": "https://git-wip-us.apache.org/repos/asf/thrift.git" + }, + "version": "0.10.0", + "author": { + "name": "Apache Thrift Developers", + "email": "dev@thrift.apache.org", + "url": "http://thrift.apache.org" + }, + "license": "Apache-2.0", + "licenses": [ + { + "type": "Apache-2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0" + } + ], + "bugs": { + "mail": "dev@thrift.apache.org", + "url": "https://issues.apache.org/jira/browse/THRIFT" + }, + "files": [ + "lib/nodejs/lib/thrift", + "lib/nodejs/README.md" + ], + "directories": { + "lib": "./lib/nodejs/lib/thrift" + }, + "main": "./lib/nodejs/lib/thrift", + "engines": { + "node": ">= 0.2.4" + }, + "dependencies": { + "node-int64": "~0.3.0", + "q": "1.0.x", + "ws": "~0.4.32" + }, + "devDependencies": { + "buffer-equals": "^1.0.3", + "commander": "2.1.x", + "connect": "2.7.x", + "istanbul": "^0.3.5", + "run-browser": "^2.0.1", + "tape": "~3.5.0" + }, + "scripts": { + "cover": "lib/nodejs/test/testAll.sh COVER", + "test": "lib/nodejs/test/testAll.sh" + } +} diff --git a/vendor/github.com/apache/thrift/sonar-project.properties b/vendor/github.com/apache/thrift/sonar-project.properties new file mode 100755 index 0000000000..6e6c5db90d --- /dev/null +++ b/vendor/github.com/apache/thrift/sonar-project.properties @@ -0,0 +1,140 @@ +# Apache Thrift © The Apache Software Foundation +# http://www.apache.org/licenses/LICENSE-2.0 +# SPDX-License-Identifier: Apache-2.0 + +# File: sonar-project.properties +# Apache Thrift configuration file for Sonar https://analysis.apache.org/ +# Sonar is an open platform to manage code quality http://www.sonarsource.org/ + + +# required metadata +sonar.projectKey=org.apache.thrift +sonar.projectName=Apache Thrift +sonar.projectDescription= +The Apache Thrift software framework, for scalable cross-language services +development, combines a software stack with a code generation engine to build +services that work efficiently and seamlessly between all major languages. + +# Apache Thrift Version +sonar.projectVersion=0.10.0 +# use this to set another version string +# $ sonar-runner -D sonar.projectVersion=`git rev-parse HEAD` +# set projectDate in combination with projectVersion for imports of old releases +#sonar.projectDate=yyyy-MM-dd + +# TODO add website (sonar.projectUrl does not work) +#sonar.XXXX=http//thrift.apache.org + +# Some properties that will be inherited by the modules +sonar.sources=src +sonar.language=java,js,c++,py,c +sonar.sourceEncoding=UTF-8 + +# scm +sonar.scm.url=scm:git:https://git-wip-us.apache.org/repos/asf/thrift + +# cppcheck -q --error-exitcode=0 --xml . 2> cppcheck-result.xml +sonar.cxx.cppcheck.reportPath=cppcheck-result.xml + +# List of the module identifiers +sonar.modules=module1,module3,module4,module5,module6,module7,module8,module9,module10,module11,module12,module14 + + + +# we need sonar-runner 2.1 for this, see http://jira.codehaus.org/browse/SONARPLUGINS-2421 +#sonar.modules=module2 + +# delph plugin is broken +#sonar.modules=module13 + +# phpunit plugin is broken +#sonar.modules=module15 + +module1.sonar.projectName=Apache Thrift - Java Library +module1.sonar.projectBaseDir=lib/java +module1.sonar.sources=src +module1.sonar.tests=test +module1.sonar.binaries=build/libthrift-0.10.0.jar +module1.sonar.libraries=build/lib/*.jar +module1.sonar.language=java + +module2.sonar.projectName=Apache Thrift - Java Tutorial +module2.sonar.projectBaseDir=. +module2.sonar.sources=tutorial/java/src, tutorial/java/gen-java +module2.sonar.binaries=tutorial/java/tutorial.jar +module2.sonar.libraries=lib/java/build/lib/*.jar,lib/java/build/libthrift-1.0.0.jar +module2.sonar.language=java + +module3.sonar.projectName=Apache Thrift - JavaScript Library +module3.sonar.projectBaseDir=lib/js +module3.sonar.sources=. +module3.sonar.exclusions=test/**/* +module3.sonar.language=js + +module4.sonar.projectName=Apache Thrift - JavaScript Tutorial +module4.sonar.projectBaseDir=tutorial/js +module4.sonar.sources=. +module4.sonar.language=web + +module5.sonar.projectName=Apache Thrift - C++ Library +module5.sonar.projectBaseDir=lib/cpp +module5.sonar.sources=src +module5.sonar.tests=test +module5.sonar.language=c++ + +module6.sonar.projectName=Apache Thrift - C++ Tutorial +module6.sonar.projectBaseDir=tutorial/cpp +module6.sonar.sources=. +module6.sonar.exclusions=gen-cpp/**/* +module6.sonar.language=c++ + +module7.sonar.projectName=Apache Thrift - C++ Cross Language Test +module7.sonar.projectBaseDir=test/cpp +module7.sonar.sources=src +module7.sonar.language=c++ + +module8.sonar.projectName=Apache Thrift - Compiler +module8.sonar.projectBaseDir=compiler/cpp +module8.sonar.sources=src +module8.sonar.language=c++ + +module9.sonar.projectName=Apache Thrift - Python Library +module9.sonar.projectBaseDir=lib/py +module9.sonar.sources=src +module9.sonar.language=py + +module10.sonar.projectName=Apache Thrift - Python Tutorial +module10.sonar.projectBaseDir=tutorial/py +module10.sonar.sources=. +module10.sonar.exclusions=gen-py/**/* +module10.sonar.language=py + +module11.sonar.projectName=Apache Thrift - Python Cross Language Test +module11.sonar.projectBaseDir=test/py +module11.sonar.sources=. +module11.sonar.exclusions=gen-*/**/* +module11.sonar.language=py + +module12.sonar.projectName=Apache Thrift - c_glib Library +module12.sonar.projectBaseDir=lib/c_glib +module12.sonar.sources=src +module12.sonar.language=c + +module13.sonar.projectName=Apache Thrift - Delphi Library +module13.sonar.projectBaseDir=lib/delphi +module13.sonar.sources=src +module13.sonar.tests=test +module13.sonar.language=delph + +module14.sonar.projectName=Apache Thrift - Flex (as3) Library +module14.sonar.projectBaseDir=lib/as3 +module14.sonar.sources=src +module14.sonar.language=flex + +module15.sonar.projectName=Apache Thrift - PHP Library +module15.sonar.projectBaseDir=lib/php +module15.sonar.sources=src +module15.sonar.language=php + +# TODO add some more languages here + diff --git a/vendor/github.com/araddon/gou/.gitignore b/vendor/github.com/araddon/gou/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/araddon/gou/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/araddon/gou/coerce_test.go b/vendor/github.com/araddon/gou/coerce_test.go new file mode 100644 index 0000000000..58922fbbc5 --- /dev/null +++ b/vendor/github.com/araddon/gou/coerce_test.go @@ -0,0 +1,25 @@ +package gou + +import ( + . "github.com/araddon/gou/goutest" + "testing" +) + +func TestCoerce(t *testing.T) { + + data := map[string]interface{}{ + "int": 4, + "float": 45.3, + "string": "22", + "stringf": "22.2", + } + Assert(CoerceStringShort(data["int"]) == "4", t, "get int as string") + Assert(CoerceStringShort(data["float"]) == "45.3", t, "get float as string: %v", data["float"]) + Assert(CoerceStringShort(data["string"]) == "22", t, "get string as string: %v", data["string"]) + Assert(CoerceStringShort(data["stringf"]) == "22.2", t, "get stringf as string: %v", data["stringf"]) + + Assert(CoerceIntShort(data["int"]) == 4, t, "get int as int: %v", data["int"]) + Assert(CoerceIntShort(data["float"]) == 45, t, "get float as int: %v", data["float"]) + Assert(CoerceIntShort(data["string"]) == 22, t, "get string as int: %v", data["string"]) + Assert(CoerceIntShort(data["stringf"]) == 22, t, "get stringf as int: %v", data["stringf"]) +} diff --git a/vendor/github.com/araddon/gou/jsonhelper_test.go b/vendor/github.com/araddon/gou/jsonhelper_test.go new file mode 100644 index 0000000000..4e4703a1a4 --- /dev/null +++ b/vendor/github.com/araddon/gou/jsonhelper_test.go @@ -0,0 +1,179 @@ +package gou + +import ( + "bytes" + "encoding/json" + . "github.com/araddon/gou/goutest" + "github.com/bmizerany/assert" + "log" + "os" + "strings" + "testing" +) + +// go test -bench=".*" +// go test -run="(Util)" + +var ( + jh JsonHelper +) + +func init() { + SetLogger(log.New(os.Stderr, "", log.Ltime|log.Lshortfile), "debug") + // create test data + json.Unmarshal([]byte(`{ + "name":"aaron", + "nullstring":null, + "ints":[1,2,3,4], + "int":1, + "intstr":"1", + "int64":1234567890, + "float64":123.456, + "float64str":"123.456", + "MaxSize" : 1048576, + "strings":["string1"], + "stringscsv":"string1,string2", + "nested":{ + "nest":"string2", + "strings":["string1"], + "int":2, + "list":["value"], + "nest2":{ + "test":"good" + } + }, + "nested2":[ + {"sub":2} + ], + "period.name":"value" + }`), &jh) +} + +func TestJsonRawWriter(t *testing.T) { + var buf bytes.Buffer + buf.WriteString(`"hello"`) + raw := json.RawMessage(buf.Bytes()) + bya, _ := json.Marshal(&buf) + Debug(string(bya)) + bya, _ = json.Marshal(&raw) + Debug(string(bya)) + + /* + bya, err := json.Marshal(buf) + Assert(string(bya) == `"hello"`, t, "Should be hello but was %s", string(bya)) + Debug(string(buf.Bytes()), err) + var jrw JsonRawWriter + jrw.WriteString(`"hello"`) + Debug(jrw.Raw()) + bya, err = json.Marshal(jrw.Raw()) + Assert(string(bya) == `"hello"`, t, "Should be hello but was %s", string(bya)) + Debug(string(jrw.Bytes()), err) + */ +} + +func TestJsonHelper(t *testing.T) { + + Assert(jh.String("name") == "aaron", t, "should get 'aaron' %s", jh.String("name")) + Assert(jh.String("nullstring") == "", t, "should get '' %s", jh.String("nullstring")) + + Assert(jh.Int("int") == 1, t, "get int ") + Assert(jh.Int("ints[0]") == 1, t, "get int from array %d", jh.Int("ints[0]")) + Assert(jh.Int("ints[2]") == 3, t, "get int from array %d", jh.Int("ints[0]")) + Assert(len(jh.Ints("ints")) == 4, t, "get int array %v", jh.Ints("ints")) + Assert(jh.Int64("int64") == 1234567890, t, "get int") + Assert(jh.Int("nested.int") == 2, t, "get int") + Assert(jh.String("nested.nest") == "string2", t, "should get string %s", jh.String("nested.nest")) + Assert(jh.String("nested.nest2.test") == "good", t, "should get string %s", jh.String("nested.nest2.test")) + Assert(jh.String("nested.list[0]") == "value", t, "get string from array") + Assert(jh.Int("nested2[0].sub") == 2, t, "get int from obj in array %d", jh.Int("nested2[0].sub")) + + Assert(jh.Int("MaxSize") == 1048576, t, "get int, test capitalization? ") + sl := jh.Strings("strings") + Assert(len(sl) == 1 && sl[0] == "string1", t, "get strings ") + sl = jh.Strings("stringscsv") + Assert(len(sl) == 2 && sl[0] == "string1", t, "get strings ") + + i64, ok := jh.Int64Safe("int64") + Assert(ok, t, "int64safe ok") + Assert(i64 == 1234567890, t, "int64safe value") + + u64, ok := jh.Uint64Safe("int64") + Assert(ok, t, "uint64safe ok") + Assert(u64 == 1234567890, t, "int64safe value") + _, ok = jh.Uint64Safe("notexistent") + assert.Tf(t, !ok, "should not be ok") + _, ok = jh.Uint64Safe("name") + assert.Tf(t, !ok, "should not be ok") + + i, ok := jh.IntSafe("int") + Assert(ok, t, "intsafe ok") + Assert(i == 1, t, "intsafe value") + + l := jh.List("nested2") + Assert(len(l) == 1, t, "get list") + + fv, ok := jh.Float64Safe("name") + assert.Tf(t, !ok, "floatsafe not ok") + fv, ok = jh.Float64Safe("float64") + assert.Tf(t, ok, "floatsafe ok") + assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) + fv, ok = jh.Float64Safe("float64str") + assert.Tf(t, ok, "floatsafe ok") + assert.Tf(t, CloseEnuf(fv, 123.456), "floatsafe value %v", fv) + + jhm := jh.Helpers("nested2") + Assert(len(jhm) == 1, t, "get list of helpers") + Assert(jhm[0].Int("sub") == 2, t, "Should get list of helpers") + +} + +func TestJsonInterface(t *testing.T) { + + var jim map[string]JsonInterface + err := json.Unmarshal([]byte(`{ + "nullstring":null, + "string":"string", + "int":22, + "float":22.2, + "floatstr":"22.2", + "intstr":"22" + }`), &jim) + Assert(err == nil, t, "no error:%v ", err) + Assert(jim["nullstring"].StringSh() == "", t, "nullstring: %v", jim["nullstring"]) + Assert(jim["string"].StringSh() == "string", t, "nullstring: %v", jim["string"]) + Assert(jim["int"].IntSh() == 22, t, "int: %v", jim["int"]) + Assert(jim["int"].StringSh() == "22", t, "int->string: %v", jim["int"]) + Assert(jim["int"].FloatSh() == float32(22), t, "int->float: %v", jim["int"]) + Assert(jim["float"].FloatSh() == 22.2, t, "float: %v", jim["float"]) + Assert(jim["float"].StringSh() == "22.2", t, "float->string: %v", jim["float"]) + Assert(jim["float"].IntSh() == 22, t, "float->int: %v", jim["float"]) + Assert(jim["intstr"].IntSh() == 22, t, "intstr: %v", jim["intstr"]) + Assert(jim["intstr"].FloatSh() == float32(22), t, "intstr->float: %v", jim["intstr"]) +} + +func TestJsonCoercion(t *testing.T) { + + Assert(jh.Int("intstr") == 1, t, "get string as int %s", jh.String("intstr")) + Assert(jh.String("int") == "1", t, "get int as string %s", jh.String("int")) + Assert(jh.Int("notint") == -1, t, "get non existent int = 0??? ") + +} + +func TestJsonPathNotation(t *testing.T) { + + // Now lets test xpath type syntax + Assert(jh.Int("/MaxSize") == 1048576, t, "get int, test capitalization? ") + Assert(jh.String("/nested/nest") == "string2", t, "should get string %s", jh.String("/nested/nest")) + Assert(jh.String("/nested/list[0]") == "value", t, "get string from array") + // note this one has period in name + Assert(jh.String("/period.name") == "value", t, "test period in name ") + +} + +func TestFromReader(t *testing.T) { + raw := `{"testing": 123}` + reader := strings.NewReader(raw) + jh, err := NewJsonHelperReader(reader) + Assert(err == nil, t, "Unexpected error decoding json: %s", err) + Assert(jh.Int("testing") == 123, t, "Unexpected value in json: %d", jh.Int("testing")) +} diff --git a/vendor/github.com/araddon/gou/uid_test.go b/vendor/github.com/araddon/gou/uid_test.go new file mode 100644 index 0000000000..7896e7c747 --- /dev/null +++ b/vendor/github.com/araddon/gou/uid_test.go @@ -0,0 +1,11 @@ +package gou + +import ( + "testing" +) + +func TestUid(t *testing.T) { + u := NewUid() + Debug(u) + Debug(NewUid()) +} diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore new file mode 100755 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/go-metrics/inmem_signal_test.go b/vendor/github.com/armon/go-metrics/inmem_signal_test.go new file mode 100644 index 0000000000..9bbca5f254 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_signal_test.go @@ -0,0 +1,46 @@ +package metrics + +import ( + "bytes" + "os" + "strings" + "syscall" + "testing" + "time" +) + +func TestInmemSignal(t *testing.T) { + buf := bytes.NewBuffer(nil) + inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) + sig := NewInmemSignal(inm, syscall.SIGUSR1, buf) + defer sig.Stop() + + inm.SetGauge([]string{"foo"}, 42) + inm.EmitKey([]string{"bar"}, 42) + inm.IncrCounter([]string{"baz"}, 42) + inm.AddSample([]string{"wow"}, 42) + + // Wait for period to end + time.Sleep(15 * time.Millisecond) + + // Send signal! + syscall.Kill(os.Getpid(), syscall.SIGUSR1) + + // Wait for flush + time.Sleep(10 * time.Millisecond) + + // Check the output + out := string(buf.Bytes()) + if !strings.Contains(out, "[G] 'foo': 42") { + t.Fatalf("bad: %v", out) + } + if !strings.Contains(out, "[P] 'bar': 42") { + t.Fatalf("bad: %v", out) + } + if !strings.Contains(out, "[C] 'baz': Count: 1 Sum: 42") { + t.Fatalf("bad: %v", out) + } + if !strings.Contains(out, "[S] 'wow': Count: 1 Sum: 42") { + t.Fatalf("bad: %v", out) + } +} diff --git a/vendor/github.com/armon/go-metrics/inmem_test.go b/vendor/github.com/armon/go-metrics/inmem_test.go new file mode 100644 index 0000000000..228a2fc1af --- /dev/null +++ b/vendor/github.com/armon/go-metrics/inmem_test.go @@ -0,0 +1,104 @@ +package metrics + +import ( + "math" + "testing" + "time" +) + +func TestInmemSink(t *testing.T) { + inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond) + + data := inm.Data() + if len(data) != 1 { + t.Fatalf("bad: %v", data) + } + + // Add data points + inm.SetGauge([]string{"foo", "bar"}, 42) + inm.EmitKey([]string{"foo", "bar"}, 42) + inm.IncrCounter([]string{"foo", "bar"}, 20) + inm.IncrCounter([]string{"foo", "bar"}, 22) + inm.AddSample([]string{"foo", "bar"}, 20) + inm.AddSample([]string{"foo", "bar"}, 22) + + data = inm.Data() + if len(data) != 1 { + t.Fatalf("bad: %v", data) + } + + intvM := data[0] + intvM.RLock() + + if time.Now().Sub(intvM.Interval) > 10*time.Millisecond { + t.Fatalf("interval too old") + } + if intvM.Gauges["foo.bar"] != 42 { + t.Fatalf("bad val: %v", intvM.Gauges) + } + if intvM.Points["foo.bar"][0] != 42 { + t.Fatalf("bad val: %v", intvM.Points) + } + + agg := intvM.Counters["foo.bar"] + if agg.Count != 2 { + t.Fatalf("bad val: %v", agg) + } + if agg.Sum != 42 { + t.Fatalf("bad val: %v", agg) + } + if agg.SumSq != 884 { + t.Fatalf("bad val: %v", agg) + } + if agg.Min != 20 { + t.Fatalf("bad val: %v", agg) + } + if agg.Max != 22 { + t.Fatalf("bad val: %v", agg) + } + if agg.Mean() != 21 { + t.Fatalf("bad val: %v", agg) + } + if agg.Stddev() != math.Sqrt(2) { + t.Fatalf("bad val: %v", agg) + } + + if agg.LastUpdated.IsZero() { + t.Fatalf("agg.LastUpdated is not set: %v", agg) + } + + diff := time.Now().Sub(agg.LastUpdated).Seconds() + if diff > 1 { + t.Fatalf("time diff too great: %f", diff) + } + + if agg = intvM.Samples["foo.bar"]; agg == nil { + t.Fatalf("missing sample") + } + + intvM.RUnlock() + + for i := 1; i < 10; i++ { + time.Sleep(10 * time.Millisecond) + inm.SetGauge([]string{"foo", "bar"}, 42) + data = inm.Data() + if len(data) != min(i+1, 5) { + t.Fatalf("bad: %v", data) + } + } + + // Should not exceed 5 intervals! + time.Sleep(10 * time.Millisecond) + inm.SetGauge([]string{"foo", "bar"}, 42) + data = inm.Data() + if len(data) != 5 { + t.Fatalf("bad: %v", data) + } +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/armon/go-metrics/metrics_test.go b/vendor/github.com/armon/go-metrics/metrics_test.go new file mode 100644 index 0000000000..f5b2a4c790 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/metrics_test.go @@ -0,0 +1,262 @@ +package metrics + +import ( + "reflect" + "runtime" + "testing" + "time" +) + +func mockMetric() (*MockSink, *Metrics) { + m := &MockSink{} + met := &Metrics{sink: m} + return m, met +} + +func TestMetrics_SetGauge(t *testing.T) { + m, met := mockMetric() + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.HostName = "test" + met.EnableHostname = true + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "test" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "gauge" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.SetGauge([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_EmitKey(t *testing.T) { + m, met := mockMetric() + met.EmitKey([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.EmitKey([]string{"key"}, float32(1)) + if m.keys[0][0] != "kv" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.EmitKey([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_IncrCounter(t *testing.T) { + m, met := mockMetric() + met.IncrCounter([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.IncrCounter([]string{"key"}, float32(1)) + if m.keys[0][0] != "counter" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.IncrCounter([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_AddSample(t *testing.T) { + m, met := mockMetric() + met.AddSample([]string{"key"}, float32(1)) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.EnableTypePrefix = true + met.AddSample([]string{"key"}, float32(1)) + if m.keys[0][0] != "sample" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.ServiceName = "service" + met.AddSample([]string{"key"}, float32(1)) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] != 1 { + t.Fatalf("") + } +} + +func TestMetrics_MeasureSince(t *testing.T) { + m, met := mockMetric() + met.TimerGranularity = time.Millisecond + n := time.Now() + met.MeasureSince([]string{"key"}, n) + if m.keys[0][0] != "key" { + t.Fatalf("") + } + if m.vals[0] > 0.1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.TimerGranularity = time.Millisecond + met.EnableTypePrefix = true + met.MeasureSince([]string{"key"}, n) + if m.keys[0][0] != "timer" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] > 0.1 { + t.Fatalf("") + } + + m, met = mockMetric() + met.TimerGranularity = time.Millisecond + met.ServiceName = "service" + met.MeasureSince([]string{"key"}, n) + if m.keys[0][0] != "service" || m.keys[0][1] != "key" { + t.Fatalf("") + } + if m.vals[0] > 0.1 { + t.Fatalf("") + } +} + +func TestMetrics_EmitRuntimeStats(t *testing.T) { + runtime.GC() + m, met := mockMetric() + met.emitRuntimeStats() + + if m.keys[0][0] != "runtime" || m.keys[0][1] != "num_goroutines" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[0] <= 1 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[1][0] != "runtime" || m.keys[1][1] != "alloc_bytes" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[1] <= 40000 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[2][0] != "runtime" || m.keys[2][1] != "sys_bytes" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[2] <= 100000 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[3][0] != "runtime" || m.keys[3][1] != "malloc_count" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[3] <= 100 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[4][0] != "runtime" || m.keys[4][1] != "free_count" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[4] <= 100 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[5][0] != "runtime" || m.keys[5][1] != "heap_objects" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[5] <= 100 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[6][0] != "runtime" || m.keys[6][1] != "total_gc_pause_ns" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[6] <= 100000 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[7][0] != "runtime" || m.keys[7][1] != "total_gc_runs" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[7] < 1 { + t.Fatalf("bad val: %v", m.vals) + } + + if m.keys[8][0] != "runtime" || m.keys[8][1] != "gc_pause_ns" { + t.Fatalf("bad key %v", m.keys) + } + if m.vals[8] <= 1000 { + t.Fatalf("bad val: %v", m.vals) + } +} + +func TestInsert(t *testing.T) { + k := []string{"hi", "bob"} + exp := []string{"hi", "there", "bob"} + out := insert(1, "there", k) + if !reflect.DeepEqual(exp, out) { + t.Fatalf("bad insert %v %v", exp, out) + } +} diff --git a/vendor/github.com/armon/go-metrics/sink_test.go b/vendor/github.com/armon/go-metrics/sink_test.go new file mode 100755 index 0000000000..15c5d771aa --- /dev/null +++ b/vendor/github.com/armon/go-metrics/sink_test.go @@ -0,0 +1,120 @@ +package metrics + +import ( + "reflect" + "testing" +) + +type MockSink struct { + keys [][]string + vals []float32 +} + +func (m *MockSink) SetGauge(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} +func (m *MockSink) EmitKey(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} +func (m *MockSink) IncrCounter(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} +func (m *MockSink) AddSample(key []string, val float32) { + m.keys = append(m.keys, key) + m.vals = append(m.vals, val) +} + +func TestFanoutSink_Gauge(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.SetGauge(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func TestFanoutSink_Key(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.EmitKey(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func TestFanoutSink_Counter(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.IncrCounter(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func TestFanoutSink_Sample(t *testing.T) { + m1 := &MockSink{} + m2 := &MockSink{} + fh := &FanoutSink{m1, m2} + + k := []string{"test"} + v := float32(42.0) + fh.AddSample(k, v) + + if !reflect.DeepEqual(m1.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m2.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m1.vals[0], v) { + t.Fatalf("val not equal") + } + if !reflect.DeepEqual(m2.vals[0], v) { + t.Fatalf("val not equal") + } +} diff --git a/vendor/github.com/armon/go-metrics/start_test.go b/vendor/github.com/armon/go-metrics/start_test.go new file mode 100755 index 0000000000..8b3210c15f --- /dev/null +++ b/vendor/github.com/armon/go-metrics/start_test.go @@ -0,0 +1,110 @@ +package metrics + +import ( + "reflect" + "testing" + "time" +) + +func TestDefaultConfig(t *testing.T) { + conf := DefaultConfig("service") + if conf.ServiceName != "service" { + t.Fatalf("Bad name") + } + if conf.HostName == "" { + t.Fatalf("missing hostname") + } + if !conf.EnableHostname || !conf.EnableRuntimeMetrics { + t.Fatalf("expect true") + } + if conf.EnableTypePrefix { + t.Fatalf("expect false") + } + if conf.TimerGranularity != time.Millisecond { + t.Fatalf("bad granularity") + } + if conf.ProfileInterval != time.Second { + t.Fatalf("bad interval") + } +} + +func Test_GlobalMetrics_SetGauge(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + SetGauge(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_EmitKey(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + EmitKey(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_IncrCounter(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + IncrCounter(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_AddSample(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + + k := []string{"test"} + v := float32(42.0) + AddSample(k, v) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if !reflect.DeepEqual(m.vals[0], v) { + t.Fatalf("val not equal") + } +} + +func Test_GlobalMetrics_MeasureSince(t *testing.T) { + m := &MockSink{} + globalMetrics = &Metrics{sink: m} + globalMetrics.TimerGranularity = time.Millisecond + + k := []string{"test"} + now := time.Now() + MeasureSince(k, now) + + if !reflect.DeepEqual(m.keys[0], k) { + t.Fatalf("key not equal") + } + if m.vals[0] > 0.1 { + t.Fatalf("val too large %v", m.vals[0]) + } +} diff --git a/vendor/github.com/armon/go-metrics/statsd_test.go b/vendor/github.com/armon/go-metrics/statsd_test.go new file mode 100644 index 0000000000..622eb5d3aa --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsd_test.go @@ -0,0 +1,105 @@ +package metrics + +import ( + "bufio" + "bytes" + "net" + "testing" + "time" +) + +func TestStatsd_Flatten(t *testing.T) { + s := &StatsdSink{} + flat := s.flattenKey([]string{"a", "b", "c", "d"}) + if flat != "a.b.c.d" { + t.Fatalf("Bad flat") + } +} + +func TestStatsd_PushFullQueue(t *testing.T) { + q := make(chan string, 1) + q <- "full" + + s := &StatsdSink{metricQueue: q} + s.pushMetric("omit") + + out := <-q + if out != "full" { + t.Fatalf("bad val %v", out) + } + + select { + case v := <-q: + t.Fatalf("bad val %v", v) + default: + } +} + +func TestStatsd_Conn(t *testing.T) { + addr := "127.0.0.1:7524" + done := make(chan bool) + go func() { + list, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 7524}) + if err != nil { + panic(err) + } + defer list.Close() + buf := make([]byte, 1500) + n, err := list.Read(buf) + if err != nil { + panic(err) + } + buf = buf[:n] + reader := bufio.NewReader(bytes.NewReader(buf)) + + line, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "gauge.val:1.000000|g\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "key.other:2.000000|kv\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "counter.me:3.000000|c\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "sample.slow_thingy:4.000000|ms\n" { + t.Fatalf("bad line %s", line) + } + + done <- true + }() + s, err := NewStatsdSink(addr) + if err != nil { + t.Fatalf("bad error") + } + + s.SetGauge([]string{"gauge", "val"}, float32(1)) + s.EmitKey([]string{"key", "other"}, float32(2)) + s.IncrCounter([]string{"counter", "me"}, float32(3)) + s.AddSample([]string{"sample", "slow thingy"}, float32(4)) + + select { + case <-done: + s.Shutdown() + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } +} diff --git a/vendor/github.com/armon/go-metrics/statsite_test.go b/vendor/github.com/armon/go-metrics/statsite_test.go new file mode 100755 index 0000000000..d9c744f416 --- /dev/null +++ b/vendor/github.com/armon/go-metrics/statsite_test.go @@ -0,0 +1,101 @@ +package metrics + +import ( + "bufio" + "net" + "testing" + "time" +) + +func acceptConn(addr string) net.Conn { + ln, _ := net.Listen("tcp", addr) + conn, _ := ln.Accept() + return conn +} + +func TestStatsite_Flatten(t *testing.T) { + s := &StatsiteSink{} + flat := s.flattenKey([]string{"a", "b", "c", "d"}) + if flat != "a.b.c.d" { + t.Fatalf("Bad flat") + } +} + +func TestStatsite_PushFullQueue(t *testing.T) { + q := make(chan string, 1) + q <- "full" + + s := &StatsiteSink{metricQueue: q} + s.pushMetric("omit") + + out := <-q + if out != "full" { + t.Fatalf("bad val %v", out) + } + + select { + case v := <-q: + t.Fatalf("bad val %v", v) + default: + } +} + +func TestStatsite_Conn(t *testing.T) { + addr := "localhost:7523" + done := make(chan bool) + go func() { + conn := acceptConn(addr) + reader := bufio.NewReader(conn) + + line, err := reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "gauge.val:1.000000|g\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "key.other:2.000000|kv\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "counter.me:3.000000|c\n" { + t.Fatalf("bad line %s", line) + } + + line, err = reader.ReadString('\n') + if err != nil { + t.Fatalf("unexpected err %s", err) + } + if line != "sample.slow_thingy:4.000000|ms\n" { + t.Fatalf("bad line %s", line) + } + + conn.Close() + done <- true + }() + s, err := NewStatsiteSink(addr) + if err != nil { + t.Fatalf("bad error") + } + + s.SetGauge([]string{"gauge", "val"}, float32(1)) + s.EmitKey([]string{"key", "other"}, float32(2)) + s.IncrCounter([]string{"counter", "me"}, float32(3)) + s.AddSample([]string{"sample", "slow thingy"}, float32(4)) + + select { + case <-done: + s.Shutdown() + case <-time.After(3 * time.Second): + t.Fatalf("timeout") + } +} diff --git a/vendor/github.com/bitly/go-hostpool/.gitignore b/vendor/github.com/bitly/go-hostpool/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/bitly/go-hostpool/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/bitly/go-hostpool/.travis.yml b/vendor/github.com/bitly/go-hostpool/.travis.yml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/bitly/go-hostpool/example_test.go b/vendor/github.com/bitly/go-hostpool/example_test.go new file mode 100644 index 0000000000..88d0e558c2 --- /dev/null +++ b/vendor/github.com/bitly/go-hostpool/example_test.go @@ -0,0 +1,13 @@ +package hostpool + +import ( + "github.com/bitly/go-hostpool" +) + +func ExampleNewEpsilonGreedy() { + hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) + hostResponse := hp.Get() + hostname := hostResponse.Host() + err := nil // (make a request with hostname) + hostResponse.Mark(err) +} diff --git a/vendor/github.com/bitly/go-hostpool/hostpool_test.go b/vendor/github.com/bitly/go-hostpool/hostpool_test.go new file mode 100644 index 0000000000..e974aa74c5 --- /dev/null +++ b/vendor/github.com/bitly/go-hostpool/hostpool_test.go @@ -0,0 +1,145 @@ +package hostpool + +import ( + "errors" + "github.com/bmizerany/assert" + "io/ioutil" + "log" + "math/rand" + "os" + "testing" + "time" +) + +func TestHostPool(t *testing.T) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stdout) + + dummyErr := errors.New("Dummy Error") + + p := New([]string{"a", "b", "c"}) + assert.Equal(t, p.Get().Host(), "a") + assert.Equal(t, p.Get().Host(), "b") + assert.Equal(t, p.Get().Host(), "c") + respA := p.Get() + assert.Equal(t, respA.Host(), "a") + + respA.Mark(dummyErr) + respB := p.Get() + respB.Mark(dummyErr) + respC := p.Get() + assert.Equal(t, respC.Host(), "c") + respC.Mark(nil) + // get again, and verify that it's still c + assert.Equal(t, p.Get().Host(), "c") + // now try to mark b as success; should fail because already marked + respB.Mark(nil) + assert.Equal(t, p.Get().Host(), "c") // would be b if it were not dead + // now restore a + respA = &standardHostPoolResponse{host: "a", pool: p} + respA.Mark(nil) + assert.Equal(t, p.Get().Host(), "a") + assert.Equal(t, p.Get().Host(), "c") + + // ensure that we get *something* back when all hosts fail + for _, host := range []string{"a", "b", "c"} { + response := &standardHostPoolResponse{host: host, pool: p} + response.Mark(dummyErr) + } + resp := p.Get() + assert.NotEqual(t, resp, nil) +} + +type mockTimer struct { + t int // the time it will always return +} + +func (t *mockTimer) between(start time.Time, end time.Time) time.Duration { + return time.Duration(t.t) * time.Millisecond +} + +func TestEpsilonGreedy(t *testing.T) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stdout) + + rand.Seed(10) + + iterations := 12000 + p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) + + timings := make(map[string]int64) + timings["a"] = 200 + timings["b"] = 300 + + hitCounts := make(map[string]int) + hitCounts["a"] = 0 + hitCounts["b"] = 0 + + log.Printf("starting first run (a, b)") + + for i := 0; i < iterations; i += 1 { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + host := hostR.Host() + hitCounts[host]++ + timing := timings[host] + p.timer = &mockTimer{t: int(timing)} + hostR.Mark(nil) + } + + for host := range hitCounts { + log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) + } + + assert.Equal(t, hitCounts["a"] > hitCounts["b"], true) + + hitCounts["a"] = 0 + hitCounts["b"] = 0 + log.Printf("starting second run (b, a)") + timings["a"] = 500 + timings["b"] = 100 + + for i := 0; i < iterations; i += 1 { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + host := hostR.Host() + hitCounts[host]++ + timing := timings[host] + p.timer = &mockTimer{t: int(timing)} + hostR.Mark(nil) + } + + for host := range hitCounts { + log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) + } + + assert.Equal(t, hitCounts["b"] > hitCounts["a"], true) +} + +func BenchmarkEpsilonGreedy(b *testing.B) { + b.StopTimer() + + // Make up some response times + zipfDist := rand.NewZipf(rand.New(rand.NewSource(0)), 1.1, 5, 5000) + timings := make([]uint64, b.N) + for i := 0; i < b.N; i++ { + timings[i] = zipfDist.Uint64() + } + + // Make the hostpool with a few hosts + p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) + + b.StartTimer() + for i := 0; i < b.N; i++ { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + p.timer = &mockTimer{t: int(timings[i])} + hostR.Mark(nil) + } +} diff --git a/vendor/github.com/bsm/sarama-cluster/.gitignore b/vendor/github.com/bsm/sarama-cluster/.gitignore new file mode 100644 index 0000000000..88113c5b27 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/.gitignore @@ -0,0 +1,4 @@ +*.log +*.pid +kafka*/ +vendor/ diff --git a/vendor/github.com/bsm/sarama-cluster/.travis.yml b/vendor/github.com/bsm/sarama-cluster/.travis.yml new file mode 100644 index 0000000000..85882e507d --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/.travis.yml @@ -0,0 +1,14 @@ +sudo: false +language: go +go: + - 1.7.3 + - 1.6.3 +install: + - go get -u github.com/Masterminds/glide + - glide install +env: + - SCALA_VERSION=2.11 KAFKA_VERSION=0.9.0.1 GO15VENDOREXPERIMENT=1 + - SCALA_VERSION=2.11 KAFKA_VERSION=0.10.0.1 GO15VENDOREXPERIMENT=1 + - SCALA_VERSION=2.11 KAFKA_VERSION=0.10.1.0 GO15VENDOREXPERIMENT=1 +script: + - make default test-race diff --git a/vendor/github.com/bsm/sarama-cluster/balancer_test.go b/vendor/github.com/bsm/sarama-cluster/balancer_test.go new file mode 100644 index 0000000000..eb474e817a --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/balancer_test.go @@ -0,0 +1,124 @@ +package cluster + +import ( + "github.com/Shopify/sarama" + . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/extensions/table" + . "github.com/onsi/gomega" +) + +var _ = Describe("Notification", func() { + + It("should init and update", func() { + n := newNotification(map[string][]int32{ + "a": {1, 2, 3}, + "b": {4, 5}, + "c": {1, 2}, + }) + n.claim(map[string][]int32{ + "a": {3, 4}, + "b": {1, 2, 3, 4}, + "d": {3, 4}, + }) + Expect(n).To(Equal(&Notification{ + Claimed: map[string][]int32{"a": {4}, "b": {1, 2, 3}, "d": {3, 4}}, + Released: map[string][]int32{"a": {1, 2}, "b": {5}, "c": {1, 2}}, + Current: map[string][]int32{"a": {3, 4}, "b": {1, 2, 3, 4}, "d": {3, 4}}, + })) + }) + +}) + +var _ = Describe("balancer", func() { + var subject *balancer + + BeforeEach(func() { + client := &mockClient{ + topics: map[string][]int32{ + "one": {0, 1, 2, 3}, + "two": {0, 1, 2}, + "three": {0, 1}, + }, + } + + var err error + subject, err = newBalancerFromMeta(client, map[string]sarama.ConsumerGroupMemberMetadata{ + "b": sarama.ConsumerGroupMemberMetadata{Topics: []string{"three", "one"}}, + "a": sarama.ConsumerGroupMemberMetadata{Topics: []string{"one", "two"}}, + }) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should parse from meta data", func() { + Expect(subject.topics).To(HaveLen(3)) + }) + + It("should perform", func() { + Expect(subject.Perform(StrategyRange)).To(Equal(map[string]map[string][]int32{ + "a": {"one": {0, 1}, "two": {0, 1, 2}}, + "b": {"one": {2, 3}, "three": {0, 1}}, + })) + + Expect(subject.Perform(StrategyRoundRobin)).To(Equal(map[string]map[string][]int32{ + "a": {"one": {0, 2}, "two": {0, 1, 2}}, + "b": {"one": {1, 3}, "three": {0, 1}}, + })) + }) + +}) + +var _ = Describe("topicInfo", func() { + + DescribeTable("Ranges", + func(memberIDs []string, partitions []int32, expected map[string][]int32) { + info := topicInfo{MemberIDs: memberIDs, Partitions: partitions} + Expect(info.Ranges()).To(Equal(expected)) + }, + + Entry("three members, three partitions", []string{"M1", "M2", "M3"}, []int32{0, 1, 2}, map[string][]int32{ + "M1": {0}, "M2": {1}, "M3": {2}, + }), + Entry("member ID order", []string{"M3", "M1", "M2"}, []int32{0, 1, 2}, map[string][]int32{ + "M1": {0}, "M2": {1}, "M3": {2}, + }), + Entry("more members than partitions", []string{"M1", "M2", "M3"}, []int32{0, 1}, map[string][]int32{ + "M1": {0}, "M3": {1}, + }), + Entry("far more members than partitions", []string{"M1", "M2", "M3"}, []int32{0}, map[string][]int32{ + "M2": {0}, + }), + Entry("fewer members than partitions", []string{"M1", "M2", "M3"}, []int32{0, 1, 2, 3}, map[string][]int32{ + "M1": {0}, "M2": {1, 2}, "M3": {3}, + }), + Entry("uneven members/partitions ratio", []string{"M1", "M2", "M3"}, []int32{0, 2, 4, 6, 8}, map[string][]int32{ + "M1": {0, 2}, "M2": {4}, "M3": {6, 8}, + }), + ) + + DescribeTable("RoundRobin", + func(memberIDs []string, partitions []int32, expected map[string][]int32) { + info := topicInfo{MemberIDs: memberIDs, Partitions: partitions} + Expect(info.RoundRobin()).To(Equal(expected)) + }, + + Entry("three members, three partitions", []string{"M1", "M2", "M3"}, []int32{0, 1, 2}, map[string][]int32{ + "M1": {0}, "M2": {1}, "M3": {2}, + }), + Entry("member ID order", []string{"M3", "M1", "M2"}, []int32{0, 1, 2}, map[string][]int32{ + "M1": {0}, "M2": {1}, "M3": {2}, + }), + Entry("more members than partitions", []string{"M1", "M2", "M3"}, []int32{0, 1}, map[string][]int32{ + "M1": {0}, "M2": {1}, + }), + Entry("far more members than partitions", []string{"M1", "M2", "M3"}, []int32{0}, map[string][]int32{ + "M1": {0}, + }), + Entry("fewer members than partitions", []string{"M1", "M2", "M3"}, []int32{0, 1, 2, 3}, map[string][]int32{ + "M1": {0, 3}, "M2": {1}, "M3": {2}, + }), + Entry("uneven members/partitions ratio", []string{"M1", "M2", "M3"}, []int32{0, 2, 4, 6, 8}, map[string][]int32{ + "M1": {0, 6}, "M2": {2, 8}, "M3": {4}, + }), + ) + +}) diff --git a/vendor/github.com/bsm/sarama-cluster/cluster_test.go b/vendor/github.com/bsm/sarama-cluster/cluster_test.go new file mode 100644 index 0000000000..3d2ba9fbfb --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/cluster_test.go @@ -0,0 +1,192 @@ +package cluster + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/Shopify/sarama" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +const ( + testGroup = "sarama-cluster-group" + testKafkaData = "/tmp/sarama-cluster-test" +) + +var ( + testKafkaRoot = "kafka_2.11-0.10.1.0" + testKafkaAddrs = []string{"127.0.0.1:29092"} + testTopics = []string{"topic-a", "topic-b"} + + testClient sarama.Client + testKafkaCmd, testZkCmd *exec.Cmd +) + +func init() { + if dir := os.Getenv("KAFKA_DIR"); dir != "" { + testKafkaRoot = dir + } +} + +var _ = Describe("offsetInfo", func() { + + It("should calculate next offset", func() { + Expect(offsetInfo{-2, ""}.NextOffset(sarama.OffsetOldest)).To(Equal(sarama.OffsetOldest)) + Expect(offsetInfo{-2, ""}.NextOffset(sarama.OffsetNewest)).To(Equal(sarama.OffsetNewest)) + Expect(offsetInfo{-1, ""}.NextOffset(sarama.OffsetOldest)).To(Equal(sarama.OffsetOldest)) + Expect(offsetInfo{-1, ""}.NextOffset(sarama.OffsetNewest)).To(Equal(sarama.OffsetNewest)) + Expect(offsetInfo{0, ""}.NextOffset(sarama.OffsetOldest)).To(Equal(int64(0))) + Expect(offsetInfo{100, ""}.NextOffset(sarama.OffsetOldest)).To(Equal(int64(100))) + }) + +}) + +var _ = Describe("int32Slice", func() { + + It("should diff", func() { + Expect(((int32Slice)(nil)).Diff(int32Slice{1, 3, 5})).To(BeNil()) + Expect(int32Slice{1, 3, 5}.Diff((int32Slice)(nil))).To(Equal([]int32{1, 3, 5})) + Expect(int32Slice{1, 3, 5}.Diff(int32Slice{1, 3, 5})).To(BeNil()) + Expect(int32Slice{1, 3, 5}.Diff(int32Slice{1, 2, 3, 4, 5})).To(BeNil()) + Expect(int32Slice{1, 3, 5}.Diff(int32Slice{2, 3, 4})).To(Equal([]int32{1, 5})) + Expect(int32Slice{1, 3, 5}.Diff(int32Slice{1, 4})).To(Equal([]int32{3, 5})) + Expect(int32Slice{1, 3, 5}.Diff(int32Slice{2, 5})).To(Equal([]int32{1, 3})) + }) + +}) + +// -------------------------------------------------------------------- + +var _ = BeforeSuite(func() { + testZkCmd = exec.Command( + testDataDir(testKafkaRoot, "bin", "kafka-run-class.sh"), + "org.apache.zookeeper.server.quorum.QuorumPeerMain", + testDataDir("zookeeper.properties"), + ) + testZkCmd.Env = []string{"KAFKA_HEAP_OPTS=-Xmx512M -Xms512M"} + // testZkCmd.Stderr = os.Stderr + // testZkCmd.Stdout = os.Stdout + + testKafkaCmd = exec.Command( + testDataDir(testKafkaRoot, "bin", "kafka-run-class.sh"), + "-name", "kafkaServer", "kafka.Kafka", + testDataDir("server.properties"), + ) + testKafkaCmd.Env = []string{"KAFKA_HEAP_OPTS=-Xmx1G -Xms1G"} + // testKafkaCmd.Stderr = os.Stderr + // testKafkaCmd.Stdout = os.Stdout + + Expect(os.MkdirAll(testKafkaData, 0777)).NotTo(HaveOccurred()) + Expect(testZkCmd.Start()).NotTo(HaveOccurred()) + Expect(testKafkaCmd.Start()).NotTo(HaveOccurred()) + + // Wait for client + Eventually(func() error { + var err error + + // sync-producer requires Return.Successes set to true + testConf := sarama.NewConfig() + testConf.Producer.Return.Successes = true + testClient, err = sarama.NewClient(testKafkaAddrs, testConf) + return err + }, "10s", "1s").ShouldNot(HaveOccurred()) + + // Ensure we can retrieve partition info + Eventually(func() error { + _, err := testClient.Partitions(testTopics[0]) + return err + }, "10s", "500ms").ShouldNot(HaveOccurred()) + + // Seed a few messages + Expect(testSeed(1000)).NotTo(HaveOccurred()) +}) + +var _ = AfterSuite(func() { + _ = testClient.Close() + + _ = testKafkaCmd.Process.Kill() + _ = testZkCmd.Process.Kill() + _ = testKafkaCmd.Wait() + _ = testZkCmd.Wait() + _ = os.RemoveAll(testKafkaData) +}) + +// -------------------------------------------------------------------- + +func TestSuite(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "sarama/cluster") +} + +func testDataDir(tokens ...string) string { + tokens = append([]string{"testdata"}, tokens...) + return filepath.Join(tokens...) +} + +// Seed messages +func testSeed(n int) error { + producer, err := sarama.NewSyncProducerFromClient(testClient) + if err != nil { + return err + } + + for i := 0; i < n; i++ { + kv := sarama.StringEncoder(fmt.Sprintf("PLAINDATA-%08d", i)) + for _, t := range testTopics { + msg := &sarama.ProducerMessage{Topic: t, Key: kv, Value: kv} + if _, _, err := producer.SendMessage(msg); err != nil { + return err + } + } + } + return producer.Close() +} + +type testConsumerMessage struct { + sarama.ConsumerMessage + ConsumerID string +} + +// -------------------------------------------------------------------- + +var _ sarama.Consumer = &mockConsumer{} +var _ sarama.PartitionConsumer = &mockPartitionConsumer{} + +type mockClient struct { + sarama.Client + + topics map[string][]int32 +} +type mockConsumer struct{ sarama.Consumer } +type mockPartitionConsumer struct { + sarama.PartitionConsumer + + Topic string + Partition int32 + Offset int64 +} + +func (m *mockClient) Partitions(t string) ([]int32, error) { + pts, ok := m.topics[t] + if !ok { + return nil, sarama.ErrInvalidTopic + } + return pts, nil +} + +func (*mockConsumer) ConsumePartition(topic string, partition int32, offset int64) (sarama.PartitionConsumer, error) { + if offset > -1 && offset < 1000 { + return nil, sarama.ErrOffsetOutOfRange + } + return &mockPartitionConsumer{ + Topic: topic, + Partition: partition, + Offset: offset, + }, nil +} + +func (*mockPartitionConsumer) Close() error { return nil } diff --git a/vendor/github.com/bsm/sarama-cluster/config_test.go b/vendor/github.com/bsm/sarama-cluster/config_test.go new file mode 100644 index 0000000000..558cd90f97 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/config_test.go @@ -0,0 +1,25 @@ +package cluster + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Config", func() { + var subject *Config + + BeforeEach(func() { + subject = NewConfig() + }) + + It("should init", func() { + Expect(subject.Group.Session.Timeout).To(Equal(30 * time.Second)) + Expect(subject.Group.Heartbeat.Interval).To(Equal(3 * time.Second)) + Expect(subject.Group.Return.Notifications).To(BeFalse()) + Expect(subject.Metadata.Retry.Max).To(Equal(3)) + // Expect(subject.Config.Version).To(Equal(sarama.V0_9_0_0)) + }) + +}) diff --git a/vendor/github.com/bsm/sarama-cluster/consumer_test.go b/vendor/github.com/bsm/sarama-cluster/consumer_test.go new file mode 100644 index 0000000000..45f5ca726d --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/consumer_test.go @@ -0,0 +1,208 @@ +package cluster + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("Consumer", func() { + + var newConsumer = func(group string) (*Consumer, error) { + config := NewConfig() + config.Consumer.Return.Errors = true + return NewConsumer(testKafkaAddrs, group, testTopics, config) + } + + var newConsumerOf = func(group string, topics ...string) (*Consumer, error) { + config := NewConfig() + config.Consumer.Return.Errors = true + return NewConsumer(testKafkaAddrs, group, topics, config) + } + + var subscriptionsOf = func(c *Consumer) GomegaAsyncAssertion { + return Eventually(func() map[string][]int32 { + return c.Subscriptions() + }, "10s", "100ms") + } + + var consume = func(consumerID, group string, max int, out chan *testConsumerMessage) { + go func() { + defer GinkgoRecover() + + cs, err := newConsumer(group) + Expect(err).NotTo(HaveOccurred()) + defer cs.Close() + cs.consumerID = consumerID + + for msg := range cs.Messages() { + out <- &testConsumerMessage{*msg, consumerID} + cs.MarkOffset(msg, "") + + if max--; max == 0 { + return + } + } + }() + } + + It("should init and share", func() { + // start CS1 + cs1, err := newConsumer(testGroup) + Expect(err).NotTo(HaveOccurred()) + + // CS1 should consume all 8 partitions + subscriptionsOf(cs1).Should(Equal(map[string][]int32{ + "topic-a": {0, 1, 2, 3}, + "topic-b": {0, 1, 2, 3}, + })) + + // start CS2 + cs2, err := newConsumer(testGroup) + Expect(err).NotTo(HaveOccurred()) + defer cs2.Close() + + // CS1 and CS2 should consume 4 partitions each + subscriptionsOf(cs1).Should(HaveLen(2)) + subscriptionsOf(cs1).Should(HaveKeyWithValue("topic-a", HaveLen(2))) + subscriptionsOf(cs1).Should(HaveKeyWithValue("topic-b", HaveLen(2))) + + subscriptionsOf(cs2).Should(HaveLen(2)) + subscriptionsOf(cs2).Should(HaveKeyWithValue("topic-a", HaveLen(2))) + subscriptionsOf(cs2).Should(HaveKeyWithValue("topic-b", HaveLen(2))) + + // shutdown CS1, now CS2 should consume all 8 partitions + Expect(cs1.Close()).NotTo(HaveOccurred()) + subscriptionsOf(cs2).Should(Equal(map[string][]int32{ + "topic-a": {0, 1, 2, 3}, + "topic-b": {0, 1, 2, 3}, + })) + }) + + It("should allow more consumers than partitions", func() { + cs1, err := newConsumerOf(testGroup, "topic-a") + Expect(err).NotTo(HaveOccurred()) + defer cs1.Close() + cs2, err := newConsumerOf(testGroup, "topic-a") + Expect(err).NotTo(HaveOccurred()) + defer cs2.Close() + cs3, err := newConsumerOf(testGroup, "topic-a") + Expect(err).NotTo(HaveOccurred()) + defer cs3.Close() + cs4, err := newConsumerOf(testGroup, "topic-a") + Expect(err).NotTo(HaveOccurred()) + + // start 4 consumers, one for each partition + subscriptionsOf(cs1).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + subscriptionsOf(cs2).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + subscriptionsOf(cs3).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + subscriptionsOf(cs4).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + + // add a 5th consumer + cs5, err := newConsumerOf(testGroup, "topic-a") + Expect(err).NotTo(HaveOccurred()) + defer cs5.Close() + + // make sure no errors occurred + Expect(cs1.Errors()).ShouldNot(Receive()) + Expect(cs2.Errors()).ShouldNot(Receive()) + Expect(cs3.Errors()).ShouldNot(Receive()) + Expect(cs4.Errors()).ShouldNot(Receive()) + Expect(cs5.Errors()).ShouldNot(Receive()) + + // close 4th, make sure the 5th takes over + Expect(cs4.Close()).To(Succeed()) + subscriptionsOf(cs1).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + subscriptionsOf(cs2).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + subscriptionsOf(cs3).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + subscriptionsOf(cs4).Should(BeEmpty()) + subscriptionsOf(cs5).Should(HaveKeyWithValue("topic-a", HaveLen(1))) + + // there should still be no errors + Expect(cs1.Errors()).ShouldNot(Receive()) + Expect(cs2.Errors()).ShouldNot(Receive()) + Expect(cs3.Errors()).ShouldNot(Receive()) + Expect(cs4.Errors()).ShouldNot(Receive()) + Expect(cs5.Errors()).ShouldNot(Receive()) + }) + + It("should be allowed to subscribe to partitions that do not exist (yet)", func() { + cs, err := newConsumerOf(testGroup, append([]string{"topic-c"}, testTopics...)...) + Expect(err).NotTo(HaveOccurred()) + defer cs.Close() + subscriptionsOf(cs).Should(Equal(map[string][]int32{ + "topic-a": {0, 1, 2, 3}, + "topic-b": {0, 1, 2, 3}, + })) + }) + + It("should support manual mark/commit", func() { + cs, err := newConsumerOf(testGroup, "topic-a") + Expect(err).NotTo(HaveOccurred()) + defer cs.Close() + + subscriptionsOf(cs).Should(Equal(map[string][]int32{ + "topic-a": {0, 1, 2, 3}}, + )) + + cs.MarkPartitionOffset("topic-a", 1, 3, "") + cs.MarkPartitionOffset("topic-a", 2, 4, "") + Expect(cs.CommitOffsets()).NotTo(HaveOccurred()) + + offsets, err := cs.fetchOffsets(cs.Subscriptions()) + Expect(err).NotTo(HaveOccurred()) + Expect(offsets).To(Equal(map[string]map[int32]offsetInfo{ + "topic-a": {0: {Offset: -1}, 1: {Offset: 4}, 2: {Offset: 5}, 3: {Offset: -1}}, + })) + }) + + It("should consume/commit/resume", func() { + acc := make(chan *testConsumerMessage, 150000) + consume("A", "fuzzing", 1500, acc) + consume("B", "fuzzing", 2000, acc) + consume("C", "fuzzing", 1500, acc) + consume("D", "fuzzing", 200, acc) + consume("E", "fuzzing", 100, acc) + + Expect(testSeed(5000)).NotTo(HaveOccurred()) + Eventually(func() int { return len(acc) }, "30s", "100ms").Should(BeNumerically(">=", 5000)) + + consume("F", "fuzzing", 300, acc) + consume("G", "fuzzing", 400, acc) + consume("H", "fuzzing", 1000, acc) + consume("I", "fuzzing", 2000, acc) + Expect(testSeed(5000)).NotTo(HaveOccurred()) + Eventually(func() int { return len(acc) }, "30s", "100ms").Should(BeNumerically(">=", 8000)) + + consume("J", "fuzzing", 1000, acc) + Expect(testSeed(5000)).NotTo(HaveOccurred()) + Eventually(func() int { return len(acc) }, "30s", "100ms").Should(BeNumerically(">=", 9000)) + + consume("K", "fuzzing", 1000, acc) + consume("L", "fuzzing", 3000, acc) + Expect(testSeed(5000)).NotTo(HaveOccurred()) + Eventually(func() int { return len(acc) }, "30s", "100ms").Should(BeNumerically(">=", 12000)) + + consume("M", "fuzzing", 1000, acc) + Expect(testSeed(5000)).NotTo(HaveOccurred()) + Eventually(func() int { return len(acc) }, "30s", "100ms").Should(BeNumerically(">=", 15000)) + + close(acc) + + uniques := make(map[string][]string) + for msg := range acc { + key := fmt.Sprintf("%s/%d/%d", msg.Topic, msg.Partition, msg.Offset) + uniques[key] = append(uniques[key], msg.ConsumerID) + } + Expect(uniques).To(HaveLen(15000)) + }) + + It("should allow close to be called multiple times", func() { + cs, err := newConsumer(testGroup) + Expect(err).NotTo(HaveOccurred()) + Expect(cs.Close()).NotTo(HaveOccurred()) + Expect(cs.Close()).NotTo(HaveOccurred()) + }) + +}) diff --git a/vendor/github.com/bsm/sarama-cluster/partitions_test.go b/vendor/github.com/bsm/sarama-cluster/partitions_test.go new file mode 100644 index 0000000000..0b02fbeff9 --- /dev/null +++ b/vendor/github.com/bsm/sarama-cluster/partitions_test.go @@ -0,0 +1,132 @@ +package cluster + +import ( + "github.com/Shopify/sarama" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = Describe("partitionConsumer", func() { + var subject *partitionConsumer + + BeforeEach(func() { + var err error + subject, err = newPartitionConsumer(&mockConsumer{}, "topic", 0, offsetInfo{2000, "m3ta"}, sarama.OffsetOldest) + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + close(subject.dead) + Expect(subject.Close()).NotTo(HaveOccurred()) + }) + + It("should set state", func() { + Expect(subject.State()).To(Equal(partitionState{ + Info: offsetInfo{2000, "m3ta"}, + Dirty: false, + })) + }) + + It("should recover from default offset if requested offset is out of bounds", func() { + pc, err := newPartitionConsumer(&mockConsumer{}, "topic", 0, offsetInfo{200, "m3ta"}, sarama.OffsetOldest) + Expect(err).NotTo(HaveOccurred()) + defer pc.Close() + close(pc.dead) + + state := pc.State() + Expect(state.Info.Offset).To(Equal(int64(-1))) + Expect(state.Info.Metadata).To(Equal("m3ta")) + }) + + It("should update state", func() { + subject.MarkOffset(2001, "met@") // should set state + Expect(subject.State()).To(Equal(partitionState{ + Info: offsetInfo{2001, "met@"}, + Dirty: true, + })) + + subject.MarkCommitted(2001) // should reset dirty status + Expect(subject.State()).To(Equal(partitionState{ + Info: offsetInfo{2001, "met@"}, + Dirty: false, + })) + + subject.MarkOffset(2001, "me7a") // should not update state + Expect(subject.State()).To(Equal(partitionState{ + Info: offsetInfo{2001, "met@"}, + Dirty: false, + })) + + subject.MarkOffset(2002, "me7a") // should bump state + Expect(subject.State()).To(Equal(partitionState{ + Info: offsetInfo{2002, "me7a"}, + Dirty: true, + })) + + subject.MarkCommitted(2001) // should not unset state + Expect(subject.State()).To(Equal(partitionState{ + Info: offsetInfo{2002, "me7a"}, + Dirty: true, + })) + }) + + It("should not fail when nil", func() { + blank := (*partitionConsumer)(nil) + Expect(func() { + _ = blank.State() + blank.MarkOffset(2001, "met@") + blank.MarkCommitted(2001) + }).NotTo(Panic()) + }) + +}) + +var _ = Describe("partitionMap", func() { + var subject *partitionMap + + BeforeEach(func() { + subject = newPartitionMap() + }) + + It("should fetch/store", func() { + Expect(subject.Fetch("topic", 0)).To(BeNil()) + + pc, err := newPartitionConsumer(&mockConsumer{}, "topic", 0, offsetInfo{2000, "m3ta"}, sarama.OffsetNewest) + Expect(err).NotTo(HaveOccurred()) + + subject.Store("topic", 0, pc) + Expect(subject.Fetch("topic", 0)).To(Equal(pc)) + Expect(subject.Fetch("topic", 1)).To(BeNil()) + Expect(subject.Fetch("other", 0)).To(BeNil()) + }) + + It("should return info", func() { + pc0, err := newPartitionConsumer(&mockConsumer{}, "topic", 0, offsetInfo{2000, "m3ta"}, sarama.OffsetNewest) + Expect(err).NotTo(HaveOccurred()) + pc1, err := newPartitionConsumer(&mockConsumer{}, "topic", 1, offsetInfo{2000, "m3ta"}, sarama.OffsetNewest) + Expect(err).NotTo(HaveOccurred()) + subject.Store("topic", 0, pc0) + subject.Store("topic", 1, pc1) + + info := subject.Info() + Expect(info).To(HaveLen(1)) + Expect(info).To(HaveKeyWithValue("topic", []int32{0, 1})) + }) + + It("should create snapshots", func() { + pc0, err := newPartitionConsumer(&mockConsumer{}, "topic", 0, offsetInfo{2000, "m3ta"}, sarama.OffsetNewest) + Expect(err).NotTo(HaveOccurred()) + pc1, err := newPartitionConsumer(&mockConsumer{}, "topic", 1, offsetInfo{2000, "m3ta"}, sarama.OffsetNewest) + Expect(err).NotTo(HaveOccurred()) + + subject.Store("topic", 0, pc0) + subject.Store("topic", 1, pc1) + subject.Fetch("topic", 1).MarkOffset(2001, "met@") + + Expect(subject.Snapshot()).To(Equal(map[topicPartition]partitionState{ + topicPartition{"topic", 0}: {offsetInfo{2000, "m3ta"}, false}, + topicPartition{"topic", 1}: {offsetInfo{2001, "met@"}, true}, + })) + }) + +}) diff --git a/vendor/github.com/codahale/hdrhistogram/.travis.yml b/vendor/github.com/codahale/hdrhistogram/.travis.yml new file mode 100644 index 0000000000..7960fc95b8 --- /dev/null +++ b/vendor/github.com/codahale/hdrhistogram/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.5 + - 1.6 + - tip diff --git a/vendor/github.com/codahale/hdrhistogram/hdr_test.go b/vendor/github.com/codahale/hdrhistogram/hdr_test.go new file mode 100644 index 0000000000..309f0ea896 --- /dev/null +++ b/vendor/github.com/codahale/hdrhistogram/hdr_test.go @@ -0,0 +1,388 @@ +package hdrhistogram_test + +import ( + "math" + "reflect" + "testing" + + "github.com/codahale/hdrhistogram" +) + +func TestHighSigFig(t *testing.T) { + input := []int64{ + 459876, 669187, 711612, 816326, 931423, 1033197, 1131895, 2477317, + 3964974, 12718782, + } + + hist := hdrhistogram.New(459876, 12718782, 5) + for _, sample := range input { + hist.RecordValue(sample) + } + + if v, want := hist.ValueAtQuantile(50), int64(1048575); v != want { + t.Errorf("Median was %v, but expected %v", v, want) + } +} + +func TestValueAtQuantile(t *testing.T) { + h := hdrhistogram.New(1, 10000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + data := []struct { + q float64 + v int64 + }{ + {q: 50, v: 500223}, + {q: 75, v: 750079}, + {q: 90, v: 900095}, + {q: 95, v: 950271}, + {q: 99, v: 990207}, + {q: 99.9, v: 999423}, + {q: 99.99, v: 999935}, + } + + for _, d := range data { + if v := h.ValueAtQuantile(d.q); v != d.v { + t.Errorf("P%v was %v, but expected %v", d.q, v, d.v) + } + } +} + +func TestMean(t *testing.T) { + h := hdrhistogram.New(1, 10000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + if v, want := h.Mean(), 500000.013312; v != want { + t.Errorf("Mean was %v, but expected %v", v, want) + } +} + +func TestStdDev(t *testing.T) { + h := hdrhistogram.New(1, 10000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + if v, want := h.StdDev(), 288675.1403682715; v != want { + t.Errorf("StdDev was %v, but expected %v", v, want) + } +} + +func TestTotalCount(t *testing.T) { + h := hdrhistogram.New(1, 10000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + if v, want := h.TotalCount(), int64(i+1); v != want { + t.Errorf("TotalCount was %v, but expected %v", v, want) + } + } +} + +func TestMax(t *testing.T) { + h := hdrhistogram.New(1, 10000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + if v, want := h.Max(), int64(1000447); v != want { + t.Errorf("Max was %v, but expected %v", v, want) + } +} + +func TestReset(t *testing.T) { + h := hdrhistogram.New(1, 10000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + h.Reset() + + if v, want := h.Max(), int64(0); v != want { + t.Errorf("Max was %v, but expected %v", v, want) + } +} + +func TestMerge(t *testing.T) { + h1 := hdrhistogram.New(1, 1000, 3) + h2 := hdrhistogram.New(1, 1000, 3) + + for i := 0; i < 100; i++ { + if err := h1.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + for i := 100; i < 200; i++ { + if err := h2.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + h1.Merge(h2) + + if v, want := h1.ValueAtQuantile(50), int64(99); v != want { + t.Errorf("Median was %v, but expected %v", v, want) + } +} + +func TestMin(t *testing.T) { + h := hdrhistogram.New(1, 10000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + if v, want := h.Min(), int64(0); v != want { + t.Errorf("Min was %v, but expected %v", v, want) + } +} + +func TestByteSize(t *testing.T) { + h := hdrhistogram.New(1, 100000, 3) + + if v, want := h.ByteSize(), 65604; v != want { + t.Errorf("ByteSize was %v, but expected %d", v, want) + } +} + +func TestRecordCorrectedValue(t *testing.T) { + h := hdrhistogram.New(1, 100000, 3) + + if err := h.RecordCorrectedValue(10, 100); err != nil { + t.Fatal(err) + } + + if v, want := h.ValueAtQuantile(75), int64(10); v != want { + t.Errorf("Corrected value was %v, but expected %v", v, want) + } +} + +func TestRecordCorrectedValueStall(t *testing.T) { + h := hdrhistogram.New(1, 100000, 3) + + if err := h.RecordCorrectedValue(1000, 100); err != nil { + t.Fatal(err) + } + + if v, want := h.ValueAtQuantile(75), int64(800); v != want { + t.Errorf("Corrected value was %v, but expected %v", v, want) + } +} + +func TestCumulativeDistribution(t *testing.T) { + h := hdrhistogram.New(1, 100000000, 3) + + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + actual := h.CumulativeDistribution() + expected := []hdrhistogram.Bracket{ + hdrhistogram.Bracket{Quantile: 0, Count: 1, ValueAt: 0}, + hdrhistogram.Bracket{Quantile: 50, Count: 500224, ValueAt: 500223}, + hdrhistogram.Bracket{Quantile: 75, Count: 750080, ValueAt: 750079}, + hdrhistogram.Bracket{Quantile: 87.5, Count: 875008, ValueAt: 875007}, + hdrhistogram.Bracket{Quantile: 93.75, Count: 937984, ValueAt: 937983}, + hdrhistogram.Bracket{Quantile: 96.875, Count: 969216, ValueAt: 969215}, + hdrhistogram.Bracket{Quantile: 98.4375, Count: 984576, ValueAt: 984575}, + hdrhistogram.Bracket{Quantile: 99.21875, Count: 992256, ValueAt: 992255}, + hdrhistogram.Bracket{Quantile: 99.609375, Count: 996352, ValueAt: 996351}, + hdrhistogram.Bracket{Quantile: 99.8046875, Count: 998400, ValueAt: 998399}, + hdrhistogram.Bracket{Quantile: 99.90234375, Count: 999424, ValueAt: 999423}, + hdrhistogram.Bracket{Quantile: 99.951171875, Count: 999936, ValueAt: 999935}, + hdrhistogram.Bracket{Quantile: 99.9755859375, Count: 999936, ValueAt: 999935}, + hdrhistogram.Bracket{Quantile: 99.98779296875, Count: 999936, ValueAt: 999935}, + hdrhistogram.Bracket{Quantile: 99.993896484375, Count: 1000000, ValueAt: 1000447}, + hdrhistogram.Bracket{Quantile: 100, Count: 1000000, ValueAt: 1000447}, + } + + if !reflect.DeepEqual(actual, expected) { + t.Errorf("CF was %#v, but expected %#v", actual, expected) + } +} + +func TestDistribution(t *testing.T) { + h := hdrhistogram.New(8, 1024, 3) + + for i := 0; i < 1024; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + actual := h.Distribution() + if len(actual) != 128 { + t.Errorf("Number of bars seen was %v, expected was 128", len(actual)) + } + for _, b := range actual { + if b.Count != 8 { + t.Errorf("Count per bar seen was %v, expected was 8", b.Count) + } + } +} + +func TestNaN(t *testing.T) { + h := hdrhistogram.New(1, 100000, 3) + if math.IsNaN(h.Mean()) { + t.Error("mean is NaN") + } + if math.IsNaN(h.StdDev()) { + t.Error("stddev is NaN") + } +} + +func TestSignificantFigures(t *testing.T) { + const sigFigs = 4 + h := hdrhistogram.New(1, 10, sigFigs) + if h.SignificantFigures() != sigFigs { + t.Errorf("Significant figures was %v, expected %d", h.SignificantFigures(), sigFigs) + } +} + +func TestLowestTrackableValue(t *testing.T) { + const minVal = 2 + h := hdrhistogram.New(minVal, 10, 3) + if h.LowestTrackableValue() != minVal { + t.Errorf("LowestTrackableValue figures was %v, expected %d", h.LowestTrackableValue(), minVal) + } +} + +func TestHighestTrackableValue(t *testing.T) { + const maxVal = 11 + h := hdrhistogram.New(1, maxVal, 3) + if h.HighestTrackableValue() != maxVal { + t.Errorf("HighestTrackableValue figures was %v, expected %d", h.HighestTrackableValue(), maxVal) + } +} + +func BenchmarkHistogramRecordValue(b *testing.B) { + h := hdrhistogram.New(1, 10000000, 3) + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + b.Fatal(err) + } + } + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + h.RecordValue(100) + } +} + +func BenchmarkNew(b *testing.B) { + b.ReportAllocs() + + for i := 0; i < b.N; i++ { + hdrhistogram.New(1, 120000, 3) // this could track 1ms-2min + } +} + +func TestUnitMagnitudeOverflow(t *testing.T) { + h := hdrhistogram.New(0, 200, 4) + if err := h.RecordValue(11); err != nil { + t.Fatal(err) + } +} + +func TestSubBucketMaskOverflow(t *testing.T) { + hist := hdrhistogram.New(2e7, 1e8, 5) + for _, sample := range [...]int64{1e8, 2e7, 3e7} { + hist.RecordValue(sample) + } + + for q, want := range map[float64]int64{ + 50: 33554431, + 83.33: 33554431, + 83.34: 100663295, + 99: 100663295, + } { + if got := hist.ValueAtQuantile(q); got != want { + t.Errorf("got %d for %fth percentile. want: %d", got, q, want) + } + } +} + +func TestExportImport(t *testing.T) { + min := int64(1) + max := int64(10000000) + sigfigs := 3 + h := hdrhistogram.New(min, max, sigfigs) + for i := 0; i < 1000000; i++ { + if err := h.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + s := h.Export() + + if v := s.LowestTrackableValue; v != min { + t.Errorf("LowestTrackableValue was %v, but expected %v", v, min) + } + + if v := s.HighestTrackableValue; v != max { + t.Errorf("HighestTrackableValue was %v, but expected %v", v, max) + } + + if v := int(s.SignificantFigures); v != sigfigs { + t.Errorf("SignificantFigures was %v, but expected %v", v, sigfigs) + } + + if imported := hdrhistogram.Import(s); !imported.Equals(h) { + t.Error("Expected Histograms to be equivalent") + } + +} + +func TestEquals(t *testing.T) { + h1 := hdrhistogram.New(1, 10000000, 3) + for i := 0; i < 1000000; i++ { + if err := h1.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + h2 := hdrhistogram.New(1, 10000000, 3) + for i := 0; i < 10000; i++ { + if err := h1.RecordValue(int64(i)); err != nil { + t.Fatal(err) + } + } + + if h1.Equals(h2) { + t.Error("Expected Histograms to not be equivalent") + } + + h1.Reset() + h2.Reset() + + if !h1.Equals(h2) { + t.Error("Expected Histograms to be equivalent") + } +} diff --git a/vendor/github.com/codahale/hdrhistogram/window_test.go b/vendor/github.com/codahale/hdrhistogram/window_test.go new file mode 100644 index 0000000000..7e787588af --- /dev/null +++ b/vendor/github.com/codahale/hdrhistogram/window_test.go @@ -0,0 +1,64 @@ +package hdrhistogram_test + +import ( + "testing" + + "github.com/codahale/hdrhistogram" +) + +func TestWindowedHistogram(t *testing.T) { + w := hdrhistogram.NewWindowed(2, 1, 1000, 3) + + for i := 0; i < 100; i++ { + w.Current.RecordValue(int64(i)) + } + w.Rotate() + + for i := 100; i < 200; i++ { + w.Current.RecordValue(int64(i)) + } + w.Rotate() + + for i := 200; i < 300; i++ { + w.Current.RecordValue(int64(i)) + } + + if v, want := w.Merge().ValueAtQuantile(50), int64(199); v != want { + t.Errorf("Median was %v, but expected %v", v, want) + } +} + +func BenchmarkWindowedHistogramRecordAndRotate(b *testing.B) { + w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + if err := w.Current.RecordValue(100); err != nil { + b.Fatal(err) + } + + if i%100000 == 1 { + w.Rotate() + } + } +} + +func BenchmarkWindowedHistogramMerge(b *testing.B) { + w := hdrhistogram.NewWindowed(3, 1, 10000000, 3) + for i := 0; i < 10000000; i++ { + if err := w.Current.RecordValue(100); err != nil { + b.Fatal(err) + } + + if i%100000 == 1 { + w.Rotate() + } + } + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + w.Merge() + } +} diff --git a/vendor/github.com/codeskyblue/go-uuid/dce.go b/vendor/github.com/codeskyblue/go-uuid/dce.go deleted file mode 100755 index 50a0f2d099..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/dce.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) UUID { - uuid := NewUUID() - if uuid != nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCEPerson(Person, uint32(os.Getuid())) -func NewDCEPerson() UUID { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCEGroup(Group, uint32(os.Getgid())) -func NewDCEGroup() UUID { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID or false. -func (uuid UUID) Domain() (Domain, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return Domain(uuid[9]), true -} - -// Id returns the id for a Version 2 UUID or false. -func (uuid UUID) Id() (uint32, bool) { - if v, _ := uuid.Version(); v != 2 { - return 0, false - } - return binary.BigEndian.Uint32(uuid[0:4]), true -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/codeskyblue/go-uuid/doc.go b/vendor/github.com/codeskyblue/go-uuid/doc.go deleted file mode 100755 index d8bd013e68..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The uuid package generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. -package uuid diff --git a/vendor/github.com/codeskyblue/go-uuid/hash.go b/vendor/github.com/codeskyblue/go-uuid/hash.go deleted file mode 100644 index cdd4192fd9..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known Name Space IDs and UUIDs -var ( - NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") - NIL = Parse("00000000-0000-0000-0000-000000000000") -) - -// NewHash returns a new UUID dervied from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space) - h.Write([]byte(data)) - s := h.Sum(nil) - uuid := make([]byte, 16) - copy(uuid, s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/codeskyblue/go-uuid/node.go b/vendor/github.com/codeskyblue/go-uuid/node.go deleted file mode 100755 index dd0a8ac189..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/node.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "net" - -var ( - interfaces []net.Interface // cached list of interfaces - ifname string // name of interface being used - nodeID []byte // hardware for version 1 UUIDs -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil && name != "" { - return false - } - } - - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - if setNodeID(ifs.HardwareAddr) { - ifname = ifs.Name - return true - } - } - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - if nodeID == nil { - nodeID = make([]byte, 6) - } - randomBits(nodeID) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - if nodeID == nil { - SetNodeInterface("") - } - nid := make([]byte, 6) - copy(nid, nodeID) - return nid -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if setNodeID(id) { - ifname = "user" - return true - } - return false -} - -func setNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - if nodeID == nil { - nodeID = make([]byte, 6) - } - copy(nodeID, id) - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - if len(uuid) != 16 { - return nil - } - node := make([]byte, 6) - copy(node, uuid[10:]) - return node -} diff --git a/vendor/github.com/codeskyblue/go-uuid/time.go b/vendor/github.com/codeskyblue/go-uuid/time.go deleted file mode 100755 index ad467968c1..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/time.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - lasttime uint64 // last time we returned - clock_seq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// adjusts the clock sequence as needed. An error is returned if the current -// time cannot be determined. -func GetTime() (Time, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clock_seq == 0 { - SetClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence a new random -// clock sequence is generated the first time a clock sequence is requested by -// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated -// for -func ClockSequence() int { - if clock_seq == 0 { - SetClockSequence(-1) - } - return int(clock_seq & 0x3fff) -} - -// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - old_seq := clock_seq - clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if old_seq != clock_seq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. It returns false if uuid is not valid. The time is only well defined -// for version 1 and 2 UUIDs. -func (uuid UUID) Time() (Time, bool) { - if len(uuid) != 16 { - return 0, false - } - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time), true -} - -// ClockSequence returns the clock sequence encoded in uuid. It returns false -// if uuid is not valid. The clock sequence is only well defined for version 1 -// and 2 UUIDs. -func (uuid UUID) ClockSequence() (int, bool) { - if len(uuid) != 16 { - return 0, false - } - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true -} diff --git a/vendor/github.com/codeskyblue/go-uuid/util.go b/vendor/github.com/codeskyblue/go-uuid/util.go deleted file mode 100644 index de40b102c4..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = []byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts the the first two hex bytes of x into a byte. -func xtob(x string) (byte, bool) { - b1 := xvalues[x[0]] - b2 := xvalues[x[1]] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/codeskyblue/go-uuid/uuid.go b/vendor/github.com/codeskyblue/go-uuid/uuid.go deleted file mode 100755 index 2920fae632..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/uuid.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID []byte - -// A Version represents a UUIDs version. -type Version byte - -// A Variant represents a UUIDs variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// New returns a new random (version 4) UUID as a string. It is a convenience -// function for NewRandom().String(). -func New() string { - return NewRandom().String() -} - -// Parse decodes s into a UUID or returns nil. Both the UUID form of -// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. -func Parse(s string) UUID { - if len(s) == 36+9 { - if strings.ToLower(s[:9]) != "urn:uuid:" { - return nil - } - s = s[9:] - } else if len(s) != 36 { - return nil - } - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return nil - } - uuid := make([]byte, 16) - for i, x := range []int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - if v, ok := xtob(s[x:]); !ok { - return nil - } else { - uuid[i] = v - } - } - return uuid -} - -// Equal returns true if uuid1 and uuid2 are equal. -func Equal(uuid1, uuid2 UUID) bool { - return bytes.Equal(uuid1, uuid2) -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - if uuid == nil || len(uuid) != 16 { - return "" - } - b := []byte(uuid) - return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", - b[:4], b[4:6], b[6:8], b[8:10], b[10:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - if uuid == nil || len(uuid) != 16 { - return "" - } - b := []byte(uuid) - return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", - b[:4], b[4:6], b[6:8], b[8:10], b[10:]) -} - -// Variant returns the variant encoded in uuid. It returns Invalid if -// uuid is invalid. -func (uuid UUID) Variant() Variant { - if len(uuid) != 16 { - return Invalid - } - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } - panic("unreachable") -} - -// Version returns the verison of uuid. It returns false if uuid is not -// valid. -func (uuid UUID) Version() (Version, bool) { - if len(uuid) != 16 { - return 0, false - } - return Version(uuid[6] >> 4), true -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implents io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/codeskyblue/go-uuid/version1.go b/vendor/github.com/codeskyblue/go-uuid/version1.go deleted file mode 100644 index 63580044b6..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/version1.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil. -func NewUUID() UUID { - if nodeID == nil { - SetNodeInterface("") - } - - now, err := GetTime() - if err != nil { - return nil - } - - uuid := make([]byte, 16) - - time_low := uint32(now & 0xffffffff) - time_mid := uint16((now >> 32) & 0xffff) - time_hi := uint16((now >> 48) & 0x0fff) - time_hi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], time_low) - binary.BigEndian.PutUint16(uuid[4:], time_mid) - binary.BigEndian.PutUint16(uuid[6:], time_hi) - binary.BigEndian.PutUint16(uuid[8:], clock_seq) - copy(uuid[10:], nodeID) - - return uuid -} diff --git a/vendor/github.com/codeskyblue/go-uuid/version4.go b/vendor/github.com/codeskyblue/go-uuid/version4.go deleted file mode 100644 index b3d4a368dd..0000000000 --- a/vendor/github.com/codeskyblue/go-uuid/version4.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -// Random returns a Random (Version 4) UUID or panics. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 × 10−11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() UUID { - uuid := make([]byte, 16) - randomBits([]byte(uuid)) - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid -} diff --git a/vendor/github.com/davecgh/go-spew/.gitignore b/vendor/github.com/davecgh/go-spew/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml new file mode 100644 index 0000000000..984e0736e7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -0,0 +1,14 @@ +language: go +go: + - 1.5.4 + - 1.6.3 + - 1.7 +install: + - go get -v golang.org/x/tools/cmd/cover +script: + - go test -v -tags=safe ./spew + - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov +after_success: + - go get -v github.com/mattn/goveralls + - export PATH=$PATH:$HOME/gopath/bin + - goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md new file mode 100644 index 0000000000..262430449b --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -0,0 +1,205 @@ +go-spew +======= + +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] +(https://coveralls.io/r/davecgh/go-spew?branch=master) + + +Go-spew implements a deep pretty printer for Go data structures to aid in +debugging. A comprehensive suite of tests with 100% test coverage is provided +to ensure proper functionality. See `test_coverage.txt` for the gocov coverage +report. Go-spew is licensed under the liberal ISC license, so it may be used in +open source or commercial projects. + +If you're interested in reading about how this package came to life and some +of the challenges involved in providing a deep pretty printer, there is a blog +post about it +[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/). + +## Documentation + +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] +(http://godoc.org/github.com/davecgh/go-spew/spew) + +Full `go doc` style documentation for the project can be viewed online without +installing this package by using the excellent GoDoc site here: +http://godoc.org/github.com/davecgh/go-spew/spew + +You can also view the documentation locally once the package is installed with +the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to +http://localhost:6060/pkg/github.com/davecgh/go-spew/spew + +## Installation + +```bash +$ go get -u github.com/davecgh/go-spew/spew +``` + +## Quick Start + +Add this import line to the file you're working in: + +```Go +import "github.com/davecgh/go-spew/spew" +``` + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + +```Go +spew.Dump(myVar1, myVar2, ...) +spew.Fdump(someWriter, myVar1, myVar2, ...) +str := spew.Sdump(myVar1, myVar2, ...) +``` + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most +compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types +and pointer addresses): + +```Go +spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) +spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) +``` + +## Debugging a Web Application Example + +Here is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production. + +```Go +package main + +import ( + "fmt" + "html" + "net/http" + + "github.com/davecgh/go-spew/spew" +) + +func handler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + fmt.Fprintf(w, "Hi there, %s!", r.URL.Path[1:]) + fmt.Fprintf(w, "") +} + +func main() { + http.HandleFunc("/", handler) + http.ListenAndServe(":8080", nil) +} +``` + +## Sample Dump Output + +``` +(main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) { + (string) "one": (bool) true + } +} +([]uint8) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| +} +``` + +## Sample Formatter Output + +Double pointer to a uint8: +``` + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 +``` + +Pointer to circular struct with a uint8 field and a pointer to itself: +``` + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} +``` + +## Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available via the +spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +``` +* Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + +* MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + +* DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + +* DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. This option + relies on access to the unsafe package, so it will not have any effect when + running in environments without access to the unsafe package such as Google + App Engine or with the "safe" build tag specified. + Pointer method invocation is enabled by default. + +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + +* ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + +* SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are supported, + with other types sorted according to the reflect.Value.String() output + which guarantees display stability. Natural map order is used by + default. + +* SpewKeys + SpewKeys specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only considered + if SortKeys is true. + +``` + +## Unsafe Package Dependency + +This package relies on the unsafe package to perform some of the more advanced +features, however it also supports a "limited" mode which allows it to work in +environments where the unsafe package is not available. By default, it will +operate in this mode on Google App Engine and when compiled with GopherJS. The +"safe" build tag may also be specified to force the package to build without +using the unsafe package. + +## License + +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/cov_report.sh b/vendor/github.com/davecgh/go-spew/cov_report.sh new file mode 100644 index 0000000000..9579497e41 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/cov_report.sh @@ -0,0 +1,22 @@ +#!/bin/sh + +# This script uses gocov to generate a test coverage report. +# The gocov tool my be obtained with the following command: +# go get github.com/axw/gocov/gocov +# +# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH. + +# Check for gocov. +if ! type gocov >/dev/null 2>&1; then + echo >&2 "This script requires the gocov tool." + echo >&2 "You may obtain it with the following command:" + echo >&2 "go get github.com/axw/gocov/gocov" + exit 1 +fi + +# Only run the cgo tests if gcc is installed. +if type gcc >/dev/null 2>&1; then + (cd spew && gocov test -tags testcgo | gocov report) +else + (cd spew && gocov test | gocov report) +fi diff --git a/vendor/github.com/davecgh/go-spew/spew/common_test.go b/vendor/github.com/davecgh/go-spew/spew/common_test.go new file mode 100644 index 0000000000..0f5ce47dca --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common_test.go @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// custom type to test Stinger interface on non-pointer receiver. +type stringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with non-pointer receivers. +func (s stringer) String() string { + return "stringer " + string(s) +} + +// custom type to test Stinger interface on pointer receiver. +type pstringer string + +// String implements the Stringer interface for testing invocation of custom +// stringers on types with only pointer receivers. +func (s *pstringer) String() string { + return "stringer " + string(*s) +} + +// xref1 and xref2 are cross referencing structs for testing circular reference +// detection. +type xref1 struct { + ps2 *xref2 +} +type xref2 struct { + ps1 *xref1 +} + +// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular +// reference for testing detection. +type indirCir1 struct { + ps2 *indirCir2 +} +type indirCir2 struct { + ps3 *indirCir3 +} +type indirCir3 struct { + ps1 *indirCir1 +} + +// embed is used to test embedded structures. +type embed struct { + a string +} + +// embedwrap is used to test embedded structures. +type embedwrap struct { + *embed + e *embed +} + +// panicer is used to intentionally cause a panic for testing spew properly +// handles them +type panicer int + +func (p panicer) String() string { + panic("test panic") +} + +// customError is used to test custom error interface invocation. +type customError int + +func (e customError) Error() string { + return fmt.Sprintf("error: %d", int(e)) +} + +// stringizeWants converts a slice of wanted test output into a format suitable +// for a test error message. +func stringizeWants(wants []string) string { + s := "" + for i, want := range wants { + if i > 0 { + s += fmt.Sprintf("want%d: %s", i+1, want) + } else { + s += "want: " + want + } + } + return s +} + +// testFailed returns whether or not a test failed by checking if the result +// of the test is in the slice of wanted strings. +func testFailed(result string, wants []string) bool { + for _, want := range wants { + if result == want { + return false + } + } + return true +} + +type sortableStruct struct { + x int +} + +func (ss sortableStruct) String() string { + return fmt.Sprintf("ss.%d", ss.x) +} + +type unsortableStruct struct { + x int +} + +type sortTestCase struct { + input []reflect.Value + expected []reflect.Value +} + +func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) { + getInterfaces := func(values []reflect.Value) []interface{} { + interfaces := []interface{}{} + for _, v := range values { + interfaces = append(interfaces, v.Interface()) + } + return interfaces + } + + for _, test := range tests { + spew.SortValues(test.input, cs) + // reflect.DeepEqual cannot really make sense of reflect.Value, + // probably because of all the pointer tricks. For instance, + // v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{} + // instead. + input := getInterfaces(test.input) + expected := getInterfaces(test.expected) + if !reflect.DeepEqual(input, expected) { + t.Errorf("Sort mismatch:\n %v != %v", input, expected) + } + } +} + +// TestSortValues ensures the sort functionality for relect.Value based sorting +// works as intended. +func TestSortValues(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + embedA := v(embed{"a"}) + embedB := v(embed{"b"}) + embedC := v(embed{"c"}) + tests := []sortTestCase{ + // No values. + { + []reflect.Value{}, + []reflect.Value{}, + }, + // Bools. + { + []reflect.Value{v(false), v(true), v(false)}, + []reflect.Value{v(false), v(false), v(true)}, + }, + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Uints. + { + []reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))}, + []reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))}, + }, + // Floats. + { + []reflect.Value{v(2.0), v(1.0), v(3.0)}, + []reflect.Value{v(1.0), v(2.0), v(3.0)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // Array + { + []reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})}, + []reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})}, + }, + // Uintptrs. + { + []reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))}, + []reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))}, + }, + // SortableStructs. + { + // Note: not sorted - DisableMethods is set. + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + // Invalid. + { + []reflect.Value{embedB, embedA, embedC}, + []reflect.Value{embedB, embedA, embedC}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithMethods ensures the sort functionality for relect.Value +// based sorting works as intended when using string methods. +func TestSortValuesWithMethods(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + // Note: not sorted - SpewKeys is false. + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: false, SpewKeys: false} + helpTestSortValues(tests, &cs, t) +} + +// TestSortValuesWithSpew ensures the sort functionality for relect.Value +// based sorting works as intended when using spew to stringify keys. +func TestSortValuesWithSpew(t *testing.T) { + v := reflect.ValueOf + + a := v("a") + b := v("b") + c := v("c") + tests := []sortTestCase{ + // Ints. + { + []reflect.Value{v(2), v(1), v(3)}, + []reflect.Value{v(1), v(2), v(3)}, + }, + // Strings. + { + []reflect.Value{b, a, c}, + []reflect.Value{a, b, c}, + }, + // SortableStructs. + { + []reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})}, + []reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})}, + }, + // UnsortableStructs. + { + []reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})}, + []reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})}, + }, + } + cs := spew.ConfigState{DisableMethods: true, SpewKeys: true} + helpTestSortValues(tests, &cs, t) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dump_test.go b/vendor/github.com/davecgh/go-spew/spew/dump_test.go new file mode 100644 index 0000000000..5aad9c7af0 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump_test.go @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Array containing bytes +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Slice containing bytes +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// dumpTest is used to describe a test to be performed against the Dump method. +type dumpTest struct { + in interface{} + wants []string +} + +// dumpTests houses all of the tests to be performed against the Dump method. +var dumpTests = make([]dumpTest, 0) + +// addDumpTest is a helper method to append the passed input and desired result +// to dumpTests +func addDumpTest(in interface{}, wants ...string) { + test := dumpTest{in, wants} + dumpTests = append(dumpTests, test) +} + +func addIntDumpTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addUintDumpTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addBoolDumpTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFloatDumpTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addComplexDumpTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addArrayDumpTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" + + vt + ") 2,\n (" + vt + ") 3\n}" + addDumpTest(v, "([3]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[3]"+vt+")()\n") + + // Array containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := [3]pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2sp := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") stringer 1,\n (" + v2t + + ") (len=" + v2i1Len + ") stringer 2,\n (" + v2t + + ") (len=" + v2i2Len + ") " + "stringer 3\n}" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + + ") (len=" + v2i0Len + ") \"1\",\n (" + v2t + ") (len=" + + v2i1Len + ") \"2\",\n (" + v2t + ") (len=" + v2i2Len + + ") " + "\"3\"\n}" + } + addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2sp+")\n") + addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2sp+")\n") + addDumpTest(nv2, "(*[3]"+v2t+")()\n") + + // Array containing interfaces. + v3i0 := "one" + v3 := [3]interface{}{v3i0, int(2), uint(3)} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Array containing bytes. + v4 := [34]byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[34]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[34]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addSliceDumpTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + vLen := fmt.Sprintf("%d", len(v)) + vCap := fmt.Sprintf("%d", cap(v)) + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" + + vt + ") 6.28,\n (" + vt + ") 12.56\n}" + addDumpTest(v, "([]"+vt+") "+vs+"\n") + addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*[]"+vt+")()\n") + + // Slice containing type with custom formatter on pointer receiver only. + v2i0 := pstringer("1") + v2i1 := pstringer("2") + v2i2 := pstringer("3") + v2 := []pstringer{v2i0, v2i1, v2i2} + v2i0Len := fmt.Sprintf("%d", len(v2i0)) + v2i1Len := fmt.Sprintf("%d", len(v2i1)) + v2i2Len := fmt.Sprintf("%d", len(v2i2)) + v2Len := fmt.Sprintf("%d", len(v2)) + v2Cap := fmt.Sprintf("%d", cap(v2)) + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.pstringer" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" + + v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len + + ") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " + + "stringer 3\n}" + addDumpTest(v2, "([]"+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*[]"+v2t+")()\n") + + // Slice containing interfaces. + v3i0 := "one" + v3 := []interface{}{v3i0, int(2), uint(3), nil} + v3i0Len := fmt.Sprintf("%d", len(v3i0)) + v3Len := fmt.Sprintf("%d", len(v3)) + v3Cap := fmt.Sprintf("%d", cap(v3)) + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " + + "(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" + + v3t4 + ") 3,\n (" + v3t5 + ") \n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Slice containing bytes. + v4 := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + v4Len := fmt.Sprintf("%d", len(v4)) + v4Cap := fmt.Sprintf("%d", cap(v4)) + nv4 := (*[]byte)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]uint8" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" + + " |............... |\n" + + " 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" + + " |!\"#$%&'()*+,-./0|\n" + + " 00000020 31 32 " + + " |12|\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") + + // Nil slice. + v5 := []int(nil) + nv5 := (*[]int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "[]int" + v5s := "" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n") + addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n") + addDumpTest(nv5, "(*"+v5t+")()\n") +} + +func addStringDumpTests() { + // Standard string. + v := "test" + vLen := fmt.Sprintf("%d", len(v)) + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "(len=" + vLen + ") \"test\"" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addInterfaceDumpTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addMapDumpTests() { + // Map with string keys and int vals. + k := "one" + kk := "two" + m := map[string]int{k: 1, kk: 2} + klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up + kkLen := fmt.Sprintf("%d", len(kk)) + mLen := fmt.Sprintf("%d", len(m)) + nilMap := map[string]int(nil) + nm := (*map[string]int)(nil) + pm := &m + mAddr := fmt.Sprintf("%p", pm) + pmAddr := fmt.Sprintf("%p", &pm) + mt := "map[string]int" + mt1 := "string" + mt2 := "int" + ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " + + "\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen + + ") \"two\": (" + mt2 + ") 2\n}" + ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " + + "\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen + + ") \"one\": (" + mt2 + ") 1\n}" + addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n") + addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n", + "(*"+mt+")("+mAddr+")("+ms2+")\n") + addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n", + "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n") + addDumpTest(nm, "(*"+mt+")()\n") + addDumpTest(nilMap, "("+mt+") \n") + + // Map with custom formatter type on pointer receiver only keys and vals. + k2 := pstringer("one") + v2 := pstringer("1") + m2 := map[pstringer]pstringer{k2: v2} + k2Len := fmt.Sprintf("%d", len(k2)) + v2Len := fmt.Sprintf("%d", len(v2)) + m2Len := fmt.Sprintf("%d", len(m2)) + nilMap2 := map[pstringer]pstringer(nil) + nm2 := (*map[pstringer]pstringer)(nil) + pm2 := &m2 + m2Addr := fmt.Sprintf("%p", pm2) + pm2Addr := fmt.Sprintf("%p", &pm2) + m2t := "map[spew_test.pstringer]spew_test.pstringer" + m2t1 := "spew_test.pstringer" + m2t2 := "spew_test.pstringer" + m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " + + "stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}" + if spew.UnsafeDisabled { + m2s = "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + + ") " + "\"one\": (" + m2t2 + ") (len=" + v2Len + + ") \"1\"\n}" + } + addDumpTest(m2, "("+m2t+") "+m2s+"\n") + addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n") + addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n") + addDumpTest(nm2, "(*"+m2t+")()\n") + addDumpTest(nilMap2, "("+m2t+") \n") + + // Map with interface keys and values. + k3 := "one" + k3Len := fmt.Sprintf("%d", len(k3)) + m3 := map[interface{}]interface{}{k3: 1} + m3Len := fmt.Sprintf("%d", len(m3)) + nilMap3 := map[interface{}]interface{}(nil) + nm3 := (*map[interface{}]interface{})(nil) + pm3 := &m3 + m3Addr := fmt.Sprintf("%p", pm3) + pm3Addr := fmt.Sprintf("%p", &pm3) + m3t := "map[interface {}]interface {}" + m3t1 := "string" + m3t2 := "int" + m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " + + "\"one\": (" + m3t2 + ") 1\n}" + addDumpTest(m3, "("+m3t+") "+m3s+"\n") + addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n") + addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n") + addDumpTest(nm3, "(*"+m3t+")()\n") + addDumpTest(nilMap3, "("+m3t+") \n") + + // Map with nil interface value. + k4 := "nil" + k4Len := fmt.Sprintf("%d", len(k4)) + m4 := map[string]interface{}{k4: nil} + m4Len := fmt.Sprintf("%d", len(m4)) + nilMap4 := map[string]interface{}(nil) + nm4 := (*map[string]interface{})(nil) + pm4 := &m4 + m4Addr := fmt.Sprintf("%p", pm4) + pm4Addr := fmt.Sprintf("%p", &pm4) + m4t := "map[string]interface {}" + m4t1 := "string" + m4t2 := "interface {}" + m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" + + " \"nil\": (" + m4t2 + ") \n}" + addDumpTest(m4, "("+m4t+") "+m4s+"\n") + addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n") + addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n") + addDumpTest(nm4, "(*"+m4t+")()\n") + addDumpTest(nilMap4, "("+m4t+") \n") +} + +func addStructDumpTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" + + v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 + + ") (len=5) stringer test2\n}" + v3sp := v3s + if spew.UnsafeDisabled { + v3s = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) \"test2\"\n}" + v3sp = "{\n s: (" + v3t2 + ") (len=4) \"test\",\n S: (" + + v3t2 + ") (len=5) stringer test2\n}" + } + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3sp+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3sp+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + eLen := fmt.Sprintf("%d", len("embedstr")) + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 + + ") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 + + ")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" + + " \"embedstr\"\n })\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n") + addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n") + addDumpTest(nv4, "(*"+v4t+")()\n") +} + +func addUintptrDumpTests() { + // Null pointer. + v := uintptr(0) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + nv2 := (*uintptr)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") +} + +func addUnsafePointerDumpTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addChanDumpTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") +} + +func addFuncDumpTests() { + // Function with no params and no returns. + v := addIntDumpTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") + + // Function with param and no returns. + v2 := TestDump + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n") + addDumpTest(nv2, "(*"+v2t+")()\n") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n") + addDumpTest(nv3, "(*"+v3t+")()\n") +} + +func addCircularDumpTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" + + vAddr + ")()\n })\n}" + vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n") + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr + + ")()\n })\n })\n}" + v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t + + ")(" + v2Addr + ")()\n })\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n") + addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n") + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")({\n ps2: (*" + v3t2 + ")(" + tic2Addr + + ")()\n })\n })\n })\n}" + v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 + + ")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr + + ")()\n })\n })\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n") + addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n") + addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n") +} + +func addPanicDumpTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +func addErrorDumpTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addDumpTest(v, "("+vt+") "+vs+"\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n") + addDumpTest(nv, "(*"+vt+")()\n") +} + +// TestDump executes all of the tests described by dumpTests. +func TestDump(t *testing.T) { + // Setup tests. + addIntDumpTests() + addUintDumpTests() + addBoolDumpTests() + addFloatDumpTests() + addComplexDumpTests() + addArrayDumpTests() + addSliceDumpTests() + addStringDumpTests() + addInterfaceDumpTests() + addMapDumpTests() + addStructDumpTests() + addUintptrDumpTests() + addUnsafePointerDumpTests() + addChanDumpTests() + addFuncDumpTests() + addCircularDumpTests() + addPanicDumpTests() + addErrorDumpTests() + addCgoDumpTests() + + t.Logf("Running %d tests", len(dumpTests)) + for i, test := range dumpTests { + buf := new(bytes.Buffer) + spew.Fdump(buf, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants)) + continue + } + } +} + +func TestDumpSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "(map[int]string) (len=3) {\n(int) 1: (string) (len=1) " + + "\"1\",\n(int) 2: (string) (len=1) \"2\",\n(int) 3: (string) " + + "(len=1) \"3\"\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "(map[spew_test.stringer]int) (len=3) {\n" + + "(spew_test.stringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.stringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.stringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) stringer 1: (int) 1,\n" + + "(spew_test.pstringer) (len=1) stringer 2: (int) 2,\n" + + "(spew_test.pstringer) (len=1) stringer 3: (int) 3\n" + + "}\n" + if spew.UnsafeDisabled { + expected = "(map[spew_test.pstringer]int) (len=3) {\n" + + "(spew_test.pstringer) (len=1) \"1\": (int) 1,\n" + + "(spew_test.pstringer) (len=1) \"2\": (int) 2,\n" + + "(spew_test.pstringer) (len=1) \"3\": (int) 3\n" + + "}\n" + } + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + + s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "(map[spew_test.customError]int) (len=3) {\n" + + "(spew_test.customError) error: 1: (int) 1,\n" + + "(spew_test.customError) error: 2: (int) 2,\n" + + "(spew_test.customError) error: 3: (int) 3\n" + + "}\n" + if s != expected { + t.Errorf("Sorted keys mismatch:\n %v %v", s, expected) + } + +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 0000000000..6ab180809a --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew/testdata" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2, v2l, v2c := testdata.GetCgoCharArray() + v2Len := fmt.Sprintf("%d", v2l) + v2Cap := fmt.Sprintf("%d", v2c) + v2t := "[6]testdata._Ctype_char" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + + "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() + v3Len := fmt.Sprintf("%d", v3l) + v3Cap := fmt.Sprintf("%d", v3c) + v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + + "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") + + // C signed char array. + v4, v4l, v4c := testdata.GetCgoSignedCharArray() + v4Len := fmt.Sprintf("%d", v4l) + v4Cap := fmt.Sprintf("%d", v4c) + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5, v5l, v5c := testdata.GetCgoUint8tArray() + v5Len := fmt.Sprintf("%d", v5l) + v5Cap := fmt.Sprintf("%d", v5c) + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + + "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() + v6Len := fmt.Sprintf("%d", v6l) + v6Cap := fmt.Sprintf("%d", v6c) + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + + "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go new file mode 100644 index 0000000000..52a0971fb3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2013 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when either cgo is not supported or "-tags testcgo" is not added to the go +// test command line. This file intentionally does not setup any cgo tests in +// this scenario. +// +build !cgo !testcgo + +package spew_test + +func addCgoDumpTests() { + // Don't add any tests for cgo since this file is only compiled when + // there should not be any cgo tests. +} diff --git a/vendor/github.com/davecgh/go-spew/spew/example_test.go b/vendor/github.com/davecgh/go-spew/spew/example_test.go new file mode 100644 index 0000000000..c6ec8c6d59 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/example_test.go @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew" +) + +type Flag int + +const ( + flagOne Flag = iota + flagTwo +) + +var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", +} + +func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) +} + +type Bar struct { + data uintptr +} + +type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} +} + +// This example demonstrates how to use Dump to dump variables to stdout. +func ExampleDump() { + // The following package level declarations are assumed for this example: + /* + type Flag int + + const ( + flagOne Flag = iota + flagTwo + ) + + var flagStrings = map[Flag]string{ + flagOne: "flagOne", + flagTwo: "flagTwo", + } + + func (f Flag) String() string { + if s, ok := flagStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown flag (%d)", int(f)) + } + + type Bar struct { + data uintptr + } + + type Foo struct { + unexportedField Bar + ExportedField map[interface{}]interface{} + } + */ + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + f := Flag(5) + b := []byte{ + 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, + 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, + 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, + 0x31, 0x32, + } + + // Dump! + spew.Dump(s1, f, b) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Flag) Unknown flag (5) + // ([]uint8) (len=34 cap=34) { + // 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + // 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + // 00000020 31 32 |12| + // } + // +} + +// This example demonstrates how to use Printf to display a variable with a +// format string and inline formatting. +func ExamplePrintf() { + // Create a double pointer to a uint 8. + ui8 := uint8(5) + pui8 := &ui8 + ppui8 := &pui8 + + // Create a circular data type. + type circular struct { + ui8 uint8 + c *circular + } + c := circular{ui8: 1} + c.c = &c + + // Print! + spew.Printf("ppui8: %v\n", ppui8) + spew.Printf("circular: %v\n", c) + + // Output: + // ppui8: <**>5 + // circular: {1 <*>{1 <*>}} +} + +// This example demonstrates how to use a ConfigState. +func ExampleConfigState() { + // Modify the indent level of the ConfigState only. The global + // configuration is not modified. + scs := spew.ConfigState{Indent: "\t"} + + // Output using the ConfigState instance. + v := map[string]int{"one": 1} + scs.Printf("v: %v\n", v) + scs.Dump(v) + + // Output: + // v: map[one:1] + // (map[string]int) (len=1) { + // (string) (len=3) "one": (int) 1 + // } +} + +// This example demonstrates how to use ConfigState.Dump to dump variables to +// stdout +func ExampleConfigState_Dump() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances with different indentation. + scs := spew.ConfigState{Indent: "\t"} + scs2 := spew.ConfigState{Indent: " "} + + // Setup some sample data structures for the example. + bar := Bar{uintptr(0)} + s1 := Foo{bar, map[interface{}]interface{}{"one": true}} + + // Dump using the ConfigState instances. + scs.Dump(s1) + scs2.Dump(s1) + + // Output: + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // (spew_test.Foo) { + // unexportedField: (spew_test.Bar) { + // data: (uintptr) + // }, + // ExportedField: (map[interface {}]interface {}) (len=1) { + // (string) (len=3) "one": (bool) true + // } + // } + // +} + +// This example demonstrates how to use ConfigState.Printf to display a variable +// with a format string and inline formatting. +func ExampleConfigState_Printf() { + // See the top-level Dump example for details on the types used in this + // example. + + // Create two ConfigState instances and modify the method handling of the + // first ConfigState only. + scs := spew.NewDefaultConfig() + scs2 := spew.NewDefaultConfig() + scs.DisableMethods = true + + // Alternatively + // scs := spew.ConfigState{Indent: " ", DisableMethods: true} + // scs2 := spew.ConfigState{Indent: " "} + + // This is of type Flag which implements a Stringer and has raw value 1. + f := flagTwo + + // Dump using the ConfigState instances. + scs.Printf("f: %v\n", f) + scs2.Printf("f: %v\n", f) + + // Output: + // f: 1 + // f: flagTwo +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format_test.go b/vendor/github.com/davecgh/go-spew/spew/format_test.go new file mode 100644 index 0000000000..f9b93abe86 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format_test.go @@ -0,0 +1,1558 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Test Summary: +NOTE: For each test, a nil pointer, a single pointer and double pointer to the +base test element are also tested to ensure proper indirection across all types. + +- Max int8, int16, int32, int64, int +- Max uint8, uint16, uint32, uint64, uint +- Boolean true and false +- Standard complex64 and complex128 +- Array containing standard ints +- Array containing type with custom formatter on pointer receiver only +- Array containing interfaces +- Slice containing standard float32 values +- Slice containing type with custom formatter on pointer receiver only +- Slice containing interfaces +- Nil slice +- Standard string +- Nil interface +- Sub-interface +- Map with string keys and int vals +- Map with custom formatter type on pointer receiver only keys and vals +- Map with interface keys and values +- Map with nil interface value +- Struct with primitives +- Struct that contains another struct +- Struct that contains custom type with Stringer pointer interface via both + exported and unexported fields +- Struct that contains embedded struct and field to same struct +- Uintptr to 0 (null pointer) +- Uintptr address of real variable +- Unsafe.Pointer to 0 (null pointer) +- Unsafe.Pointer to address of real variable +- Nil channel +- Standard int channel +- Function with no params and no returns +- Function with param and no returns +- Function with multiple params and multiple returns +- Struct that is circular through self referencing +- Structs that are circular through cross referencing +- Structs that are indirectly circular +- Type that panics in its Stringer interface +- Type that has a custom Error interface +- %x passthrough with uint +- %#x passthrough with uint +- %f passthrough with precision +- %f passthrough with width and precision +- %d passthrough with width +- %q passthrough with string +*/ + +package spew_test + +import ( + "bytes" + "fmt" + "testing" + "unsafe" + + "github.com/davecgh/go-spew/spew" +) + +// formatterTest is used to describe a test to be performed against NewFormatter. +type formatterTest struct { + format string + in interface{} + wants []string +} + +// formatterTests houses all of the tests to be performed against NewFormatter. +var formatterTests = make([]formatterTest, 0) + +// addFormatterTest is a helper method to append the passed input and desired +// result to formatterTests. +func addFormatterTest(format string, in interface{}, wants ...string) { + test := formatterTest{format, in, wants} + formatterTests = append(formatterTests, test) +} + +func addIntFormatterTests() { + // Max int8. + v := int8(127) + nv := (*int8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "int8" + vs := "127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max int16. + v2 := int16(32767) + nv2 := (*int16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "int16" + v2s := "32767" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max int32. + v3 := int32(2147483647) + nv3 := (*int32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "int32" + v3s := "2147483647" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max int64. + v4 := int64(9223372036854775807) + nv4 := (*int64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "int64" + v4s := "9223372036854775807" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max int. + v5 := int(2147483647) + nv5 := (*int)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "int" + v5s := "2147483647" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"") +} + +func addUintFormatterTests() { + // Max uint8. + v := uint8(255) + nv := (*uint8)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uint8" + vs := "255" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Max uint16. + v2 := uint16(65535) + nv2 := (*uint16)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Max uint32. + v3 := uint32(4294967295) + nv3 := (*uint32)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "uint32" + v3s := "4294967295" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + + // Max uint64. + v4 := uint64(18446744073709551615) + nv4 := (*uint64)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "uint64" + v4s := "18446744073709551615" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") + + // Max uint. + v5 := uint(4294967295) + nv5 := (*uint)(nil) + pv5 := &v5 + v5Addr := fmt.Sprintf("%p", pv5) + pv5Addr := fmt.Sprintf("%p", &pv5) + v5t := "uint" + v5s := "4294967295" + addFormatterTest("%v", v5, v5s) + addFormatterTest("%v", pv5, "<*>"+v5s) + addFormatterTest("%v", &pv5, "<**>"+v5s) + addFormatterTest("%v", nv5, "") + addFormatterTest("%+v", v5, v5s) + addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s) + addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%+v", nv5, "") + addFormatterTest("%#v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s) + addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") + addFormatterTest("%#+v", v5, "("+v5t+")"+v5s) + addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s) + addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s) + addFormatterTest("%#v", nv5, "(*"+v5t+")"+"") +} + +func addBoolFormatterTests() { + // Boolean true. + v := bool(true) + nv := (*bool)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "bool" + vs := "true" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Boolean false. + v2 := bool(false) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "bool" + v2s := "false" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFloatFormatterTests() { + // Standard float32. + v := float32(3.1415) + nv := (*float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "float32" + vs := "3.1415" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard float64. + v2 := float64(3.1415926) + nv2 := (*float64)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "float64" + v2s := "3.1415926" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addComplexFormatterTests() { + // Standard complex64. + v := complex(float32(6), -2) + nv := (*complex64)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "complex64" + vs := "(6-2i)" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Standard complex128. + v2 := complex(float64(-6), 2) + nv2 := (*complex128)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "complex128" + v2s := "(-6+2i)" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") +} + +func addArrayFormatterTests() { + // Array containing standard ints. + v := [3]int{1, 2, 3} + nv := (*[3]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[3]int" + vs := "[1 2 3]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Array containing type with custom formatter on pointer receiver only. + v2 := [3]pstringer{"1", "2", "3"} + nv2 := (*[3]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[3]spew_test.pstringer" + v2sp := "[stringer 1 stringer 2 stringer 3]" + v2s := v2sp + if spew.UnsafeDisabled { + v2s = "[1 2 3]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2sp) + addFormatterTest("%v", &pv2, "<**>"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2sp) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2sp) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2sp) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2sp) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2sp) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Array containing interfaces. + v3 := [3]interface{}{"one", int(2), uint(3)} + nv3 := (*[3]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[3]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3s := "[one 2 3]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addSliceFormatterTests() { + // Slice containing standard float32 values. + v := []float32{3.14, 6.28, 12.56} + nv := (*[]float32)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "[]float32" + vs := "[3.14 6.28 12.56]" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Slice containing type with custom formatter on pointer receiver only. + v2 := []pstringer{"1", "2", "3"} + nv2 := (*[]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "[]spew_test.pstringer" + v2s := "[stringer 1 stringer 2 stringer 3]" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Slice containing interfaces. + v3 := []interface{}{"one", int(2), uint(3), nil} + nv3 := (*[]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "[]interface {}" + v3t2 := "string" + v3t3 := "int" + v3t4 := "uint" + v3t5 := "interface {}" + v3s := "[one 2 3 ]" + v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 + + ")]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Nil slice. + var v4 []int + nv4 := (*[]int)(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "[]int" + v4s := "" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStringFormatterTests() { + // Standard string. + v := "test" + nv := (*string)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "string" + vs := "test" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addInterfaceFormatterTests() { + // Nil interface. + var v interface{} + nv := (*interface{})(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "interface {}" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Sub-interface. + v2 := interface{}(uint16(65535)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uint16" + v2s := "65535" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addMapFormatterTests() { + // Map with string keys and int vals. + v := map[string]int{"one": 1, "two": 2} + nilMap := map[string]int(nil) + nv := (*map[string]int)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "map[string]int" + vs := "map[one:1 two:2]" + vs2 := "map[two:2 one:1]" + addFormatterTest("%v", v, vs, vs2) + addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs, + "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nilMap, "") + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2) + addFormatterTest("%#v", nilMap, "("+vt+")"+"") + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs, + "(*"+vt+")("+vAddr+")"+vs2) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs, + "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%#+v", nilMap, "("+vt+")"+"") + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Map with custom formatter type on pointer receiver only keys and vals. + v2 := map[pstringer]pstringer{"one": "1"} + nv2 := (*map[pstringer]pstringer)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "map[spew_test.pstringer]spew_test.pstringer" + v2s := "map[stringer one:stringer 1]" + if spew.UnsafeDisabled { + v2s = "map[one:1]" + } + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Map with interface keys and values. + v3 := map[interface{}]interface{}{"one": 1} + nv3 := (*map[interface{}]interface{})(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "map[interface {}]interface {}" + v3t1 := "string" + v3t2 := "int" + v3s := "map[one:1]" + v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Map with nil interface value + v4 := map[string]interface{}{"nil": nil} + nv4 := (*map[string]interface{})(nil) + pv4 := &v4 + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "map[string]interface {}" + v4t1 := "interface {}" + v4s := "map[nil:]" + v4s2 := "map[nil:(" + v4t1 + ")]" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addStructFormatterTests() { + // Struct with primitives. + type s1 struct { + a int8 + b uint8 + } + v := s1{127, 255} + nv := (*s1)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.s1" + vt2 := "int8" + vt3 := "uint8" + vs := "{127 255}" + vs2 := "{a:127 b:255}" + vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs2) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs3) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs3) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs3) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Struct that contains another struct. + type s2 struct { + s1 s1 + b bool + } + v2 := s2{s1{127, 255}, true} + nv2 := (*s2)(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.s2" + v2t2 := "spew_test.s1" + v2t3 := "int8" + v2t4 := "uint8" + v2t5 := "bool" + v2s := "{{127 255} true}" + v2s2 := "{s1:{a:127 b:255} b:true}" + v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" + + v2t5 + ")true}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s2) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Struct that contains custom type with Stringer pointer interface via both + // exported and unexported fields. + type s3 struct { + s pstringer + S pstringer + } + v3 := s3{"test", "test2"} + nv3 := (*s3)(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.s3" + v3t2 := "spew_test.pstringer" + v3s := "{stringer test stringer test2}" + v3sp := v3s + v3s2 := "{s:stringer test S:stringer test2}" + v3s2p := v3s2 + v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}" + v3s3p := v3s3 + if spew.UnsafeDisabled { + v3s = "{test test2}" + v3sp = "{test stringer test2}" + v3s2 = "{s:test S:test2}" + v3s2p = "{s:test S:stringer test2}" + v3s3 = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")test2}" + v3s3p = "{s:(" + v3t2 + ")test S:(" + v3t2 + ")stringer test2}" + } + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3sp) + addFormatterTest("%v", &pv3, "<**>"+v3sp) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s2) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s2p) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s2p) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s3p) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s3p) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s3) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s3p) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") + + // Struct that contains embedded struct and field to same struct. + e := embed{"embedstr"} + v4 := embedwrap{embed: &e, e: &e} + nv4 := (*embedwrap)(nil) + pv4 := &v4 + eAddr := fmt.Sprintf("%p", &e) + v4Addr := fmt.Sprintf("%p", pv4) + pv4Addr := fmt.Sprintf("%p", &pv4) + v4t := "spew_test.embedwrap" + v4t2 := "spew_test.embed" + v4t3 := "string" + v4s := "{<*>{embedstr} <*>{embedstr}}" + v4s2 := "{embed:<*>(" + eAddr + "){a:embedstr} e:<*>(" + eAddr + + "){a:embedstr}}" + v4s3 := "{embed:(*" + v4t2 + "){a:(" + v4t3 + ")embedstr} e:(*" + v4t2 + + "){a:(" + v4t3 + ")embedstr}}" + v4s4 := "{embed:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + + ")embedstr} e:(*" + v4t2 + ")(" + eAddr + "){a:(" + v4t3 + ")embedstr}}" + addFormatterTest("%v", v4, v4s) + addFormatterTest("%v", pv4, "<*>"+v4s) + addFormatterTest("%v", &pv4, "<**>"+v4s) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%+v", v4, v4s2) + addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s2) + addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s2) + addFormatterTest("%+v", nv4, "") + addFormatterTest("%#v", v4, "("+v4t+")"+v4s3) + addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s3) + addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s3) + addFormatterTest("%#v", nv4, "(*"+v4t+")"+"") + addFormatterTest("%#+v", v4, "("+v4t+")"+v4s4) + addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s4) + addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s4) + addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"") +} + +func addUintptrFormatterTests() { + // Null pointer. + v := uintptr(0) + nv := (*uintptr)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "uintptr" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := uintptr(unsafe.Pointer(&i)) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "uintptr" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addUnsafePointerFormatterTests() { + // Null pointer. + v := unsafe.Pointer(uintptr(0)) + nv := (*unsafe.Pointer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "unsafe.Pointer" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Address of real variable. + i := 1 + v2 := unsafe.Pointer(&i) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "unsafe.Pointer" + v2s := fmt.Sprintf("%p", &i) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addChanFormatterTests() { + // Nil channel. + var v chan int + pv := &v + nv := (*chan int)(nil) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "chan int" + vs := "" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Real channel. + v2 := make(chan int) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "chan int" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) +} + +func addFuncFormatterTests() { + // Function with no params and no returns. + v := addIntFormatterTests + nv := (*func())(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "func()" + vs := fmt.Sprintf("%p", v) + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") + + // Function with param and no returns. + v2 := TestFormatter + nv2 := (*func(*testing.T))(nil) + pv2 := &v2 + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "func(*testing.T)" + v2s := fmt.Sprintf("%p", v2) + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s) + addFormatterTest("%v", &pv2, "<**>"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%+v", v2, v2s) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%+v", nv2, "") + addFormatterTest("%#v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s) + addFormatterTest("%#v", nv2, "(*"+v2t+")"+"") + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s) + addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"") + + // Function with multiple params and multiple returns. + var v3 = func(i int, s string) (b bool, err error) { + return true, nil + } + nv3 := (*func(int, string) (bool, error))(nil) + pv3 := &v3 + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "func(int, string) (bool, error)" + v3s := fmt.Sprintf("%p", v3) + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s) + addFormatterTest("%v", &pv3, "<**>"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%+v", v3, v3s) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%+v", nv3, "") + addFormatterTest("%#v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s) + addFormatterTest("%#v", nv3, "(*"+v3t+")"+"") + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s) + addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"") +} + +func addCircularFormatterTests() { + // Struct that is circular through self referencing. + type circular struct { + c *circular + } + v := circular{nil} + v.c = &v + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.circular" + vs := "{<*>{<*>}}" + vs2 := "{<*>}" + vs3 := "{c:<*>(" + vAddr + "){c:<*>(" + vAddr + ")}}" + vs4 := "{c:<*>(" + vAddr + ")}" + vs5 := "{c:(*" + vt + "){c:(*" + vt + ")}}" + vs6 := "{c:(*" + vt + ")}" + vs7 := "{c:(*" + vt + ")(" + vAddr + "){c:(*" + vt + ")(" + vAddr + + ")}}" + vs8 := "{c:(*" + vt + ")(" + vAddr + ")}" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs2) + addFormatterTest("%v", &pv, "<**>"+vs2) + addFormatterTest("%+v", v, vs3) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs4) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs4) + addFormatterTest("%#v", v, "("+vt+")"+vs5) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs6) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs6) + addFormatterTest("%#+v", v, "("+vt+")"+vs7) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs8) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs8) + + // Structs that are circular through cross referencing. + v2 := xref1{nil} + ts2 := xref2{&v2} + v2.ps2 = &ts2 + pv2 := &v2 + ts2Addr := fmt.Sprintf("%p", &ts2) + v2Addr := fmt.Sprintf("%p", pv2) + pv2Addr := fmt.Sprintf("%p", &pv2) + v2t := "spew_test.xref1" + v2t2 := "spew_test.xref2" + v2s := "{<*>{<*>{<*>}}}" + v2s2 := "{<*>{<*>}}" + v2s3 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + "){ps2:<*>(" + + ts2Addr + ")}}}" + v2s4 := "{ps2:<*>(" + ts2Addr + "){ps1:<*>(" + v2Addr + ")}}" + v2s5 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + "){ps2:(*" + v2t2 + + ")}}}" + v2s6 := "{ps2:(*" + v2t2 + "){ps1:(*" + v2t + ")}}" + v2s7 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + "){ps2:(*" + v2t2 + ")(" + ts2Addr + + ")}}}" + v2s8 := "{ps2:(*" + v2t2 + ")(" + ts2Addr + "){ps1:(*" + v2t + + ")(" + v2Addr + ")}}" + addFormatterTest("%v", v2, v2s) + addFormatterTest("%v", pv2, "<*>"+v2s2) + addFormatterTest("%v", &pv2, "<**>"+v2s2) + addFormatterTest("%+v", v2, v2s3) + addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s4) + addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s4) + addFormatterTest("%#v", v2, "("+v2t+")"+v2s5) + addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s6) + addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s6) + addFormatterTest("%#+v", v2, "("+v2t+")"+v2s7) + addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s8) + addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s8) + + // Structs that are indirectly circular. + v3 := indirCir1{nil} + tic2 := indirCir2{nil} + tic3 := indirCir3{&v3} + tic2.ps3 = &tic3 + v3.ps2 = &tic2 + pv3 := &v3 + tic2Addr := fmt.Sprintf("%p", &tic2) + tic3Addr := fmt.Sprintf("%p", &tic3) + v3Addr := fmt.Sprintf("%p", pv3) + pv3Addr := fmt.Sprintf("%p", &pv3) + v3t := "spew_test.indirCir1" + v3t2 := "spew_test.indirCir2" + v3t3 := "spew_test.indirCir3" + v3s := "{<*>{<*>{<*>{<*>}}}}" + v3s2 := "{<*>{<*>{<*>}}}" + v3s3 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + "){ps2:<*>(" + tic2Addr + ")}}}}" + v3s4 := "{ps2:<*>(" + tic2Addr + "){ps3:<*>(" + tic3Addr + "){ps1:<*>(" + + v3Addr + ")}}}" + v3s5 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + "){ps2:(*" + v3t2 + ")}}}}" + v3s6 := "{ps2:(*" + v3t2 + "){ps3:(*" + v3t3 + "){ps1:(*" + v3t + + ")}}}" + v3s7 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + "){ps2:(*" + v3t2 + + ")(" + tic2Addr + ")}}}}" + v3s8 := "{ps2:(*" + v3t2 + ")(" + tic2Addr + "){ps3:(*" + v3t3 + ")(" + + tic3Addr + "){ps1:(*" + v3t + ")(" + v3Addr + ")}}}" + addFormatterTest("%v", v3, v3s) + addFormatterTest("%v", pv3, "<*>"+v3s2) + addFormatterTest("%v", &pv3, "<**>"+v3s2) + addFormatterTest("%+v", v3, v3s3) + addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s4) + addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s4) + addFormatterTest("%#v", v3, "("+v3t+")"+v3s5) + addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s6) + addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s6) + addFormatterTest("%#+v", v3, "("+v3t+")"+v3s7) + addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s8) + addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s8) +} + +func addPanicFormatterTests() { + // Type that panics in its Stringer interface. + v := panicer(127) + nv := (*panicer)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.panicer" + vs := "(PANIC=test panic)127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addErrorFormatterTests() { + // Type that has a custom Error interface. + v := customError(127) + nv := (*customError)(nil) + pv := &v + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "spew_test.customError" + vs := "error: 127" + addFormatterTest("%v", v, vs) + addFormatterTest("%v", pv, "<*>"+vs) + addFormatterTest("%v", &pv, "<**>"+vs) + addFormatterTest("%v", nv, "") + addFormatterTest("%+v", v, vs) + addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs) + addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%+v", nv, "") + addFormatterTest("%#v", v, "("+vt+")"+vs) + addFormatterTest("%#v", pv, "(*"+vt+")"+vs) + addFormatterTest("%#v", &pv, "(**"+vt+")"+vs) + addFormatterTest("%#v", nv, "(*"+vt+")"+"") + addFormatterTest("%#+v", v, "("+vt+")"+vs) + addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs) + addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs) + addFormatterTest("%#+v", nv, "(*"+vt+")"+"") +} + +func addPassthroughFormatterTests() { + // %x passthrough with uint. + v := uint(4294967295) + pv := &v + vAddr := fmt.Sprintf("%x", pv) + pvAddr := fmt.Sprintf("%x", &pv) + vs := "ffffffff" + addFormatterTest("%x", v, vs) + addFormatterTest("%x", pv, vAddr) + addFormatterTest("%x", &pv, pvAddr) + + // %#x passthrough with uint. + v2 := int(2147483647) + pv2 := &v2 + v2Addr := fmt.Sprintf("%#x", pv2) + pv2Addr := fmt.Sprintf("%#x", &pv2) + v2s := "0x7fffffff" + addFormatterTest("%#x", v2, v2s) + addFormatterTest("%#x", pv2, v2Addr) + addFormatterTest("%#x", &pv2, pv2Addr) + + // %f passthrough with precision. + addFormatterTest("%.2f", 3.1415, "3.14") + addFormatterTest("%.3f", 3.1415, "3.142") + addFormatterTest("%.4f", 3.1415, "3.1415") + + // %f passthrough with width and precision. + addFormatterTest("%5.2f", 3.1415, " 3.14") + addFormatterTest("%6.3f", 3.1415, " 3.142") + addFormatterTest("%7.4f", 3.1415, " 3.1415") + + // %d passthrough with width. + addFormatterTest("%3d", 127, "127") + addFormatterTest("%4d", 127, " 127") + addFormatterTest("%5d", 127, " 127") + + // %q passthrough with string. + addFormatterTest("%q", "test", "\"test\"") +} + +// TestFormatter executes all of the tests described by formatterTests. +func TestFormatter(t *testing.T) { + // Setup tests. + addIntFormatterTests() + addUintFormatterTests() + addBoolFormatterTests() + addFloatFormatterTests() + addComplexFormatterTests() + addArrayFormatterTests() + addSliceFormatterTests() + addStringFormatterTests() + addInterfaceFormatterTests() + addMapFormatterTests() + addStructFormatterTests() + addUintptrFormatterTests() + addUnsafePointerFormatterTests() + addChanFormatterTests() + addFuncFormatterTests() + addCircularFormatterTests() + addPanicFormatterTests() + addErrorFormatterTests() + addPassthroughFormatterTests() + + t.Logf("Running %d tests", len(formatterTests)) + for i, test := range formatterTests { + buf := new(bytes.Buffer) + spew.Fprintf(buf, test.format, test.in) + s := buf.String() + if testFailed(s, test.wants) { + t.Errorf("Formatter #%d format: %s got: %s %s", i, test.format, s, + stringizeWants(test.wants)) + continue + } + } +} + +type testStruct struct { + x int +} + +func (ts testStruct) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +type testStructP struct { + x int +} + +func (ts *testStructP) String() string { + return fmt.Sprintf("ts.%d", ts.x) +} + +func TestPrintSortedKeys(t *testing.T) { + cfg := spew.ConfigState{SortKeys: true} + s := cfg.Sprint(map[int]string{1: "1", 3: "3", 2: "2"}) + expected := "map[1:1 2:2 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 1:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[stringer]int{"1": 1, "3": 3, "2": 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 2:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2}) + expected = "map[stringer 1:1 stringer 2:2 stringer 3:3]" + if spew.UnsafeDisabled { + expected = "map[1:1 2:2 3:3]" + } + if s != expected { + t.Errorf("Sorted keys mismatch 3:\n %v %v", s, expected) + } + + s = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 4:\n %v %v", s, expected) + } + + if !spew.UnsafeDisabled { + s = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2}) + expected = "map[ts.1:1 ts.2:2 ts.3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 5:\n %v %v", s, expected) + } + } + + s = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2}) + expected = "map[error: 1:1 error: 2:2 error: 3:3]" + if s != expected { + t.Errorf("Sorted keys mismatch 6:\n %v %v", s, expected) + } +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internal_test.go b/vendor/github.com/davecgh/go-spew/spew/internal_test.go new file mode 100644 index 0000000000..20a9cfefc6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internal_test.go @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" +) + +// dummyFmtState implements a fake fmt.State to use for testing invalid +// reflect.Value handling. This is necessary because the fmt package catches +// invalid values before invoking the formatter on them. +type dummyFmtState struct { + bytes.Buffer +} + +func (dfs *dummyFmtState) Flag(f int) bool { + if f == int('+') { + return true + } + return false +} + +func (dfs *dummyFmtState) Precision() (int, bool) { + return 0, false +} + +func (dfs *dummyFmtState) Width() (int, bool) { + return 0, false +} + +// TestInvalidReflectValue ensures the dump and formatter code handles an +// invalid reflect value properly. This needs access to internal state since it +// should never happen in real code and therefore can't be tested via the public +// API. +func TestInvalidReflectValue(t *testing.T) { + i := 1 + + // Dump invalid reflect value. + v := new(reflect.Value) + buf := new(bytes.Buffer) + d := dumpState{w: buf, cs: &Config} + d.dump(*v) + s := buf.String() + want := "" + if s != want { + t.Errorf("InvalidReflectValue #%d\n got: %s want: %s", i, s, want) + } + i++ + + // Formatter invalid reflect value. + buf2 := new(dummyFmtState) + f := formatState{value: *v, cs: &Config, fs: buf2} + f.format(*v) + s = buf2.String() + want = "" + if s != want { + t.Errorf("InvalidReflectValue #%d got: %s want: %s", i, s, want) + } +} + +// SortValues makes the internal sortValues function available to the test +// package. +func SortValues(values []reflect.Value, cs *ConfigState) { + sortValues(values, cs) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go new file mode 100644 index 0000000000..a0c612ec3d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go @@ -0,0 +1,102 @@ +// Copyright (c) 2013-2016 Dave Collins + +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. + +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +/* +This test file is part of the spew package rather than than the spew_test +package because it needs access to internals to properly test certain cases +which are not possible via the public interface since they should never happen. +*/ + +package spew + +import ( + "bytes" + "reflect" + "testing" + "unsafe" +) + +// changeKind uses unsafe to intentionally change the kind of a reflect.Value to +// the maximum kind value which does not exist. This is needed to test the +// fallback code which punts to the standard fmt library for new types that +// might get added to the language. +func changeKind(v *reflect.Value, readOnly bool) { + rvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag)) + *rvf = *rvf | ((1< + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew_test + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +// spewFunc is used to identify which public function of the spew package or +// ConfigState a test applies to. +type spewFunc int + +const ( + fCSFdump spewFunc = iota + fCSFprint + fCSFprintf + fCSFprintln + fCSPrint + fCSPrintln + fCSSdump + fCSSprint + fCSSprintf + fCSSprintln + fCSErrorf + fCSNewFormatter + fErrorf + fFprint + fFprintln + fPrint + fPrintln + fSdump + fSprint + fSprintf + fSprintln +) + +// Map of spewFunc values to names for pretty printing. +var spewFuncStrings = map[spewFunc]string{ + fCSFdump: "ConfigState.Fdump", + fCSFprint: "ConfigState.Fprint", + fCSFprintf: "ConfigState.Fprintf", + fCSFprintln: "ConfigState.Fprintln", + fCSSdump: "ConfigState.Sdump", + fCSPrint: "ConfigState.Print", + fCSPrintln: "ConfigState.Println", + fCSSprint: "ConfigState.Sprint", + fCSSprintf: "ConfigState.Sprintf", + fCSSprintln: "ConfigState.Sprintln", + fCSErrorf: "ConfigState.Errorf", + fCSNewFormatter: "ConfigState.NewFormatter", + fErrorf: "spew.Errorf", + fFprint: "spew.Fprint", + fFprintln: "spew.Fprintln", + fPrint: "spew.Print", + fPrintln: "spew.Println", + fSdump: "spew.Sdump", + fSprint: "spew.Sprint", + fSprintf: "spew.Sprintf", + fSprintln: "spew.Sprintln", +} + +func (f spewFunc) String() string { + if s, ok := spewFuncStrings[f]; ok { + return s + } + return fmt.Sprintf("Unknown spewFunc (%d)", int(f)) +} + +// spewTest is used to describe a test to be performed against the public +// functions of the spew package or ConfigState. +type spewTest struct { + cs *spew.ConfigState + f spewFunc + format string + in interface{} + want string +} + +// spewTests houses the tests to be performed against the public functions of +// the spew package and ConfigState. +// +// These tests are only intended to ensure the public functions are exercised +// and are intentionally not exhaustive of types. The exhaustive type +// tests are handled in the dump and format tests. +var spewTests []spewTest + +// redirStdout is a helper function to return the standard output from f as a +// byte slice. +func redirStdout(f func()) ([]byte, error) { + tempFile, err := ioutil.TempFile("", "ss-test") + if err != nil { + return nil, err + } + fileName := tempFile.Name() + defer os.Remove(fileName) // Ignore error + + origStdout := os.Stdout + os.Stdout = tempFile + f() + os.Stdout = origStdout + tempFile.Close() + + return ioutil.ReadFile(fileName) +} + +func initSpewTests() { + // Config states with various settings. + scsDefault := spew.NewDefaultConfig() + scsNoMethods := &spew.ConfigState{Indent: " ", DisableMethods: true} + scsNoPmethods := &spew.ConfigState{Indent: " ", DisablePointerMethods: true} + scsMaxDepth := &spew.ConfigState{Indent: " ", MaxDepth: 1} + scsContinue := &spew.ConfigState{Indent: " ", ContinueOnMethod: true} + scsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true} + scsNoCap := &spew.ConfigState{DisableCapacities: true} + + // Variables for tests on types which implement Stringer interface with and + // without a pointer receiver. + ts := stringer("test") + tps := pstringer("test") + + type ptrTester struct { + s *struct{} + } + tptr := &ptrTester{s: &struct{}{}} + + // depthTester is used to test max depth handling for structs, array, slices + // and maps. + type depthTester struct { + ic indirCir1 + arr [1]string + slice []string + m map[string]int + } + dt := depthTester{indirCir1{nil}, [1]string{"arr"}, []string{"slice"}, + map[string]int{"one": 1}} + + // Variable for tests on types which implement error interface. + te := customError(10) + + spewTests = []spewTest{ + {scsDefault, fCSFdump, "", int8(127), "(int8) 127\n"}, + {scsDefault, fCSFprint, "", int16(32767), "32767"}, + {scsDefault, fCSFprintf, "%v", int32(2147483647), "2147483647"}, + {scsDefault, fCSFprintln, "", int(2147483647), "2147483647\n"}, + {scsDefault, fCSPrint, "", int64(9223372036854775807), "9223372036854775807"}, + {scsDefault, fCSPrintln, "", uint8(255), "255\n"}, + {scsDefault, fCSSdump, "", uint8(64), "(uint8) 64\n"}, + {scsDefault, fCSSprint, "", complex(1, 2), "(1+2i)"}, + {scsDefault, fCSSprintf, "%v", complex(float32(3), 4), "(3+4i)"}, + {scsDefault, fCSSprintln, "", complex(float64(5), 6), "(5+6i)\n"}, + {scsDefault, fCSErrorf, "%#v", uint16(65535), "(uint16)65535"}, + {scsDefault, fCSNewFormatter, "%v", uint32(4294967295), "4294967295"}, + {scsDefault, fErrorf, "%v", uint64(18446744073709551615), "18446744073709551615"}, + {scsDefault, fFprint, "", float32(3.14), "3.14"}, + {scsDefault, fFprintln, "", float64(6.28), "6.28\n"}, + {scsDefault, fPrint, "", true, "true"}, + {scsDefault, fPrintln, "", false, "false\n"}, + {scsDefault, fSdump, "", complex(-10, -20), "(complex128) (-10-20i)\n"}, + {scsDefault, fSprint, "", complex(-1, -2), "(-1-2i)"}, + {scsDefault, fSprintf, "%v", complex(float32(-3), -4), "(-3-4i)"}, + {scsDefault, fSprintln, "", complex(float64(-5), -6), "(-5-6i)\n"}, + {scsNoMethods, fCSFprint, "", ts, "test"}, + {scsNoMethods, fCSFprint, "", &ts, "<*>test"}, + {scsNoMethods, fCSFprint, "", tps, "test"}, + {scsNoMethods, fCSFprint, "", &tps, "<*>test"}, + {scsNoPmethods, fCSFprint, "", ts, "stringer test"}, + {scsNoPmethods, fCSFprint, "", &ts, "<*>stringer test"}, + {scsNoPmethods, fCSFprint, "", tps, "test"}, + {scsNoPmethods, fCSFprint, "", &tps, "<*>stringer test"}, + {scsMaxDepth, fCSFprint, "", dt, "{{} [] [] map[]}"}, + {scsMaxDepth, fCSFdump, "", dt, "(spew_test.depthTester) {\n" + + " ic: (spew_test.indirCir1) {\n \n },\n" + + " arr: ([1]string) (len=1 cap=1) {\n \n },\n" + + " slice: ([]string) (len=1 cap=1) {\n \n },\n" + + " m: (map[string]int) (len=1) {\n \n }\n}\n"}, + {scsContinue, fCSFprint, "", ts, "(stringer test) test"}, + {scsContinue, fCSFdump, "", ts, "(spew_test.stringer) " + + "(len=4) (stringer test) \"test\"\n"}, + {scsContinue, fCSFprint, "", te, "(error: 10) 10"}, + {scsContinue, fCSFdump, "", te, "(spew_test.customError) " + + "(error: 10) 10\n"}, + {scsNoPtrAddr, fCSFprint, "", tptr, "<*>{<*>{}}"}, + {scsNoPtrAddr, fCSSdump, "", tptr, "(*spew_test.ptrTester)({\ns: (*struct {})({\n})\n})\n"}, + {scsNoCap, fCSSdump, "", make([]string, 0, 10), "([]string) {\n}\n"}, + {scsNoCap, fCSSdump, "", make([]string, 1, 10), "([]string) (len=1) {\n(string) \"\"\n}\n"}, + } +} + +// TestSpew executes all of the tests described by spewTests. +func TestSpew(t *testing.T) { + initSpewTests() + + t.Logf("Running %d tests", len(spewTests)) + for i, test := range spewTests { + buf := new(bytes.Buffer) + switch test.f { + case fCSFdump: + test.cs.Fdump(buf, test.in) + + case fCSFprint: + test.cs.Fprint(buf, test.in) + + case fCSFprintf: + test.cs.Fprintf(buf, test.format, test.in) + + case fCSFprintln: + test.cs.Fprintln(buf, test.in) + + case fCSPrint: + b, err := redirStdout(func() { test.cs.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSPrintln: + b, err := redirStdout(func() { test.cs.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fCSSdump: + str := test.cs.Sdump(test.in) + buf.WriteString(str) + + case fCSSprint: + str := test.cs.Sprint(test.in) + buf.WriteString(str) + + case fCSSprintf: + str := test.cs.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fCSSprintln: + str := test.cs.Sprintln(test.in) + buf.WriteString(str) + + case fCSErrorf: + err := test.cs.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fCSNewFormatter: + fmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in)) + + case fErrorf: + err := spew.Errorf(test.format, test.in) + buf.WriteString(err.Error()) + + case fFprint: + spew.Fprint(buf, test.in) + + case fFprintln: + spew.Fprintln(buf, test.in) + + case fPrint: + b, err := redirStdout(func() { spew.Print(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fPrintln: + b, err := redirStdout(func() { spew.Println(test.in) }) + if err != nil { + t.Errorf("%v #%d %v", test.f, i, err) + continue + } + buf.Write(b) + + case fSdump: + str := spew.Sdump(test.in) + buf.WriteString(str) + + case fSprint: + str := spew.Sprint(test.in) + buf.WriteString(str) + + case fSprintf: + str := spew.Sprintf(test.format, test.in) + buf.WriteString(str) + + case fSprintln: + str := spew.Sprintln(test.in) + buf.WriteString(str) + + default: + t.Errorf("%v #%d unrecognized function", test.f, i) + continue + } + s := buf.String() + if test.want != s { + t.Errorf("ConfigState #%d\n got: %s want: %s", i, s, test.want) + continue + } + } +} diff --git a/vendor/github.com/davecgh/go-spew/test_coverage.txt b/vendor/github.com/davecgh/go-spew/test_coverage.txt new file mode 100644 index 0000000000..2cd087a2a1 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/test_coverage.txt @@ -0,0 +1,61 @@ + +github.com/davecgh/go-spew/spew/dump.go dumpState.dump 100.00% (88/88) +github.com/davecgh/go-spew/spew/format.go formatState.format 100.00% (82/82) +github.com/davecgh/go-spew/spew/format.go formatState.formatPtr 100.00% (52/52) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpPtr 100.00% (44/44) +github.com/davecgh/go-spew/spew/dump.go dumpState.dumpSlice 100.00% (39/39) +github.com/davecgh/go-spew/spew/common.go handleMethods 100.00% (30/30) +github.com/davecgh/go-spew/spew/common.go printHexPtr 100.00% (18/18) +github.com/davecgh/go-spew/spew/common.go unsafeReflectValue 100.00% (13/13) +github.com/davecgh/go-spew/spew/format.go formatState.constructOrigFormat 100.00% (12/12) +github.com/davecgh/go-spew/spew/dump.go fdump 100.00% (11/11) +github.com/davecgh/go-spew/spew/format.go formatState.Format 100.00% (11/11) +github.com/davecgh/go-spew/spew/common.go init 100.00% (10/10) +github.com/davecgh/go-spew/spew/common.go printComplex 100.00% (9/9) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Less 100.00% (8/8) +github.com/davecgh/go-spew/spew/format.go formatState.buildDefaultFormat 100.00% (7/7) +github.com/davecgh/go-spew/spew/format.go formatState.unpackValue 100.00% (5/5) +github.com/davecgh/go-spew/spew/dump.go dumpState.indent 100.00% (4/4) +github.com/davecgh/go-spew/spew/common.go catchPanic 100.00% (4/4) +github.com/davecgh/go-spew/spew/config.go ConfigState.convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/spew.go convertArgs 100.00% (4/4) +github.com/davecgh/go-spew/spew/format.go newFormatter 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go printBool 100.00% (3/3) +github.com/davecgh/go-spew/spew/common.go sortValues 100.00% (3/3) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sdump 100.00% (3/3) +github.com/davecgh/go-spew/spew/dump.go dumpState.unpackValue 100.00% (3/3) +github.com/davecgh/go-spew/spew/spew.go Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printFloat 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go NewDefaultConfig 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printInt 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go printUint 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Len 100.00% (1/1) +github.com/davecgh/go-spew/spew/common.go valuesSorter.Swap 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Print 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Printf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Println 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Sprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/config.go ConfigState.Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Fdump 100.00% (1/1) +github.com/davecgh/go-spew/spew/dump.go Dump 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintln 100.00% (1/1) +github.com/davecgh/go-spew/spew/format.go NewFormatter 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Errorf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprint 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Fprintf 100.00% (1/1) +github.com/davecgh/go-spew/spew/spew.go Print 100.00% (1/1) +github.com/davecgh/go-spew/spew ------------------------------- 100.00% (505/505) + diff --git a/vendor/github.com/dgryski/go-bits/bits_test.go b/vendor/github.com/dgryski/go-bits/bits_test.go new file mode 100644 index 0000000000..f49de8b776 --- /dev/null +++ b/vendor/github.com/dgryski/go-bits/bits_test.go @@ -0,0 +1,50 @@ +package bits + +import ( + "testing" + "testing/quick" +) + +func testQuick(t *testing.T, which string, ffast, fslow func(x uint64) uint64) { + f := func(x uint64) bool { + return ffast(x) == fslow(x) + } + if err := quick.Check(f, nil); err != nil { + t.Errorf("fast%v != slow%v: %v: ", which, which, err) + } +} + +func ctzSlow(x uint64) uint64 { + var n uint64 + for x&1 == 0 { + n++ + x >>= 1 + } + return n +} + +func TestQuickCtz(t *testing.T) { testQuick(t, "ctz", Ctz, ctzSlow) } + +func clzSlow(x uint64) uint64 { + var n uint64 + for x&0x8000000000000000 == 0 { + n++ + x <<= 1 + } + return n +} + +func TestQuickClz(t *testing.T) { testQuick(t, "clz", Clz, clzSlow) } + +func popcntSlow(x uint64) uint64 { + var n uint64 + for x != 0 { + if x&1 == 1 { + n++ + } + x >>= 1 + } + return n +} + +func TestQuickPopcnt(t *testing.T) { testQuick(t, "popcnt", Popcnt, popcntSlow) } diff --git a/vendor/github.com/dgryski/go-linlog/linlog_test.go b/vendor/github.com/dgryski/go-linlog/linlog_test.go new file mode 100644 index 0000000000..6f47ecee2f --- /dev/null +++ b/vendor/github.com/dgryski/go-linlog/linlog_test.go @@ -0,0 +1,146 @@ +package linlog + +import ( + "reflect" + "testing" +) + +func TestBinOf(t *testing.T) { + + tests := []struct { + sz uint64 + l uint64 + b uint64 + wb uint64 + wsz uint64 + }{ + { + 0, 4, 2, + 0, 0, + }, + { + 1, 4, 2, + 1, 4, + }, + { + 4, 4, 2, + 1, 4, + }, + { + 5, 4, 2, + 2, 8, + }, + { + 9, 4, 2, + 3, 12, + }, + { + 15, 4, 2, + 4, 16, + }, + { + 17, 4, 2, + 5, 20, + }, + { + 34, 4, 2, + 9, 40, + }, + } + + for _, tt := range tests { + if r, b := BinOf(tt.sz, tt.l, tt.b); r != tt.wsz || b != tt.wb { + t.Errorf("BinOf(%d,%d,%d)=(%d,%d), want (%d,%d)", tt.sz, tt.l, tt.b, r, b, tt.wsz, tt.wb) + } + } +} + +func TestBinDownOf(t *testing.T) { + + tests := []struct { + sz uint64 + l uint64 + b uint64 + wb uint64 + wsz uint64 + }{ + { + 0, 4, 2, + 0, 0, + }, + { + 1, 4, 2, + 0, 0, + }, + { + 3, 4, 2, + 0, 0, + }, + { + 4, 4, 2, + 1, 4, + }, + { + 7, 4, 2, + 1, 4, + }, + { + 15, 4, 2, + 3, 12, + }, + { + 16, 4, 2, + 4, 16, + }, + { + 17, 4, 2, + 4, 16, + }, + { + 34, 4, 2, + 8, 32, + }, + } + + for _, tt := range tests { + if r, b := BinDownOf(tt.sz, tt.l, tt.b); r != tt.wsz || b != tt.wb { + t.Errorf("BinDownOf(%d,%d,%d)=(%d,%d), want (%d,%d)", tt.sz, tt.l, tt.b, r, b, tt.wsz, tt.wb) + } + } +} + +func TestBins(t *testing.T) { + var tests = []struct { + m, l, s uint64 + }{ + {1024, 4, 2}, + {1024, 4, 4}, + {1024, 5, 2}, + {1024, 5, 3}, + {1024, 5, 4}, + {1024, 6, 2}, + {1024, 6, 3}, + {1024, 6, 4}, + {1024, 6, 5}, + } + + for _, tt := range tests { + var bins []uint64 + var prev uint64 = ^uint64(0) + for i := uint64(0); i < tt.m; i++ { + r, _ := BinOf(uint64(i), tt.l, tt.s) + if r != prev { + bins = append(bins, r) + prev = r + } + } + + b := Bins(tt.m, tt.l, tt.s) + + t.Logf("Bins(%v,%v,%v)=%v", tt.m, tt.l, tt.s, b) + + if !reflect.DeepEqual(b, bins) { + t.Errorf("Bins(%v,%v,%v)=%v, want %v\n", tt.m, tt.l, tt.s, b, bins) + } + } +} diff --git a/vendor/github.com/dgryski/go-tsz/.gitignore b/vendor/github.com/dgryski/go-tsz/.gitignore new file mode 100644 index 0000000000..b10906d2eb --- /dev/null +++ b/vendor/github.com/dgryski/go-tsz/.gitignore @@ -0,0 +1 @@ +eval/eval diff --git a/vendor/github.com/dgryski/go-tsz/tsz_test.go b/vendor/github.com/dgryski/go-tsz/tsz_test.go new file mode 100644 index 0000000000..25946115d0 --- /dev/null +++ b/vendor/github.com/dgryski/go-tsz/tsz_test.go @@ -0,0 +1,275 @@ +package tsz + +import ( + "github.com/dgryski/go-tsz/testdata" + "testing" + "time" +) + +func TestExampleEncoding(t *testing.T) { + + // Example from the paper + t0, _ := time.ParseInLocation("Jan _2 2006 15:04:05", "Mar 24 2015 02:00:00", time.Local) + tunix := uint32(t0.Unix()) + + s := New(tunix) + + tunix += 62 + s.Push(tunix, 12) + + tunix += 60 + s.Push(tunix, 12) + + tunix += 60 + s.Push(tunix, 24) + + // extra tests + + // floating point masking/shifting bug + tunix += 60 + s.Push(tunix, 13) + + tunix += 60 + s.Push(tunix, 24) + + // delta-of-delta sizes + tunix += 300 // == delta-of-delta of 240 + s.Push(tunix, 24) + + tunix += 900 // == delta-of-delta of 600 + s.Push(tunix, 24) + + tunix += 900 + 2050 // == delta-of-delta of 600 + s.Push(tunix, 24) + + it := s.Iter() + + tunix = uint32(t0.Unix()) + want := []struct { + t uint32 + v float64 + }{ + {tunix + 62, 12}, + {tunix + 122, 12}, + {tunix + 182, 24}, + + {tunix + 242, 13}, + {tunix + 302, 24}, + + {tunix + 602, 24}, + {tunix + 1502, 24}, + {tunix + 4452, 24}, + } + + for _, w := range want { + if !it.Next() { + t.Fatalf("Next()=false, want true") + } + tt, vv := it.Values() + if w.t != tt || w.v != vv { + t.Errorf("Values()=(%v,%v), want (%v,%v)\n", tt, vv, w.t, w.v) + } + } + + if it.Next() { + t.Fatalf("Next()=true, want false") + } + + if err := it.Err(); err != nil { + t.Errorf("it.Err()=%v, want nil", err) + } +} + +func TestRoundtrip(t *testing.T) { + + s := New(testdata.TwoHoursData[0].T) + for _, p := range testdata.TwoHoursData { + s.Push(p.T, p.V) + } + + it := s.Iter() + for _, w := range testdata.TwoHoursData { + if !it.Next() { + t.Fatalf("Next()=false, want true") + } + tt, vv := it.Values() + // t.Logf("it.Values()=(%+v, %+v)\n", time.Unix(int64(tt), 0), vv) + if w.T != tt || w.V != vv { + t.Errorf("Values()=(%v,%v), want (%v,%v)\n", tt, vv, w.T, w.V) + } + } + + if it.Next() { + t.Fatalf("Next()=true, want false") + } + + if err := it.Err(); err != nil { + t.Errorf("it.Err()=%v, want nil", err) + } +} + +func TestConcurrentRoundtripImmediateWrites(t *testing.T) { + testConcurrentRoundtrip(t, time.Duration(0)) +} +func TestConcurrentRoundtrip1MsBetweenWrites(t *testing.T) { + testConcurrentRoundtrip(t, time.Millisecond) +} +func TestConcurrentRoundtrip10MsBetweenWrites(t *testing.T) { + testConcurrentRoundtrip(t, 10*time.Millisecond) +} + +// Test reading while writing at the same time. +func testConcurrentRoundtrip(t *testing.T, sleep time.Duration) { + s := New(testdata.TwoHoursData[0].T) + + //notify the reader about the number of points that have been written. + writeNotify := make(chan int) + + // notify the reader when we have finished. + done := make(chan struct{}) + + // continuously iterate over the values of the series. + // when a write is made, the total number of points in the series + // will be sent over the channel, so we can make sure we are reading + // the correct amount of values. + go func(numPoints chan int, finished chan struct{}) { + written := 0 + for { + select { + case written = <-numPoints: + default: + read := 0 + it := s.Iter() + // read all of the points in the series. + for it.Next() { + tt, vv := it.Values() + expectedT := testdata.TwoHoursData[read].T + expectedV := testdata.TwoHoursData[read].V + if expectedT != tt || expectedV != vv { + t.Errorf("metric values dont match what was written. (%d, %f) != (%d, %f)\n", tt, vv, expectedT, expectedV) + } + read++ + } + //check that the number of points read matches the number of points + // written to the series. + if read != written && read != written+1 { + // check if a point was written while we were running + select { + case written = <-numPoints: + // a new point was written. + if read != written && read != written+1 { + t.Errorf("expexcted %d values in series, got %d", written, read) + } + default: + t.Errorf("expexcted %d values in series, got %d", written, read) + } + } + } + //check if we have finished writing points. + select { + case <-finished: + return + default: + } + } + }(writeNotify, done) + + // write points to the series. + for i := 0; i < 100; i++ { + s.Push(testdata.TwoHoursData[i].T, testdata.TwoHoursData[i].V) + writeNotify <- i + 1 + time.Sleep(sleep) + } + done <- struct{}{} +} + +func BenchmarkEncode(b *testing.B) { + b.SetBytes(int64(len(testdata.TwoHoursData) * 12)) + for i := 0; i < b.N; i++ { + s := New(testdata.TwoHoursData[0].T) + for _, tt := range testdata.TwoHoursData { + s.Push(tt.T, tt.V) + } + } +} + +func BenchmarkDecodeSeries(b *testing.B) { + b.SetBytes(int64(len(testdata.TwoHoursData) * 12)) + s := New(testdata.TwoHoursData[0].T) + for _, tt := range testdata.TwoHoursData { + s.Push(tt.T, tt.V) + } + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + it := s.Iter() + var j int + for it.Next() { + j++ + } + } +} + +func BenchmarkDecodeByteSlice(b *testing.B) { + b.SetBytes(int64(len(testdata.TwoHoursData) * 12)) + s := New(testdata.TwoHoursData[0].T) + for _, tt := range testdata.TwoHoursData { + s.Push(tt.T, tt.V) + } + + s.Finish() + bytes := s.Bytes() + buf := make([]byte, len(bytes)) + b.ResetTimer() + + for i := 0; i < b.N; i++ { + copy(buf, bytes) + it, _ := NewIterator(buf) + var j int + for it.Next() { + j++ + } + } +} + +func TestEncodeSimilarFloats(t *testing.T) { + tunix := uint32(time.Unix(0, 0).Unix()) + s := New(tunix) + want := []struct { + t uint32 + v float64 + }{ + {tunix, 6.00065e+06}, + {tunix + 1, 6.000656e+06}, + {tunix + 2, 6.000657e+06}, + {tunix + 3, 6.000659e+06}, + {tunix + 4, 6.000661e+06}, + } + + for _, v := range want { + s.Push(v.t, v.v) + } + + s.Finish() + + it := s.Iter() + + for _, w := range want { + if !it.Next() { + t.Fatalf("Next()=false, want true") + } + tt, vv := it.Values() + if w.t != tt || w.v != vv { + t.Errorf("Values()=(%v,%v), want (%v,%v)\n", tt, vv, w.v, w.v) + } + } + + if it.Next() { + t.Fatalf("Next()=true, want false") + } + + if err := it.Err(); err != nil { + t.Errorf("it.Err()=%v, want nil", err) + } +} diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore new file mode 100644 index 0000000000..1c3ae0a773 --- /dev/null +++ b/vendor/github.com/docker/distribution/.gitignore @@ -0,0 +1,37 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap new file mode 100644 index 0000000000..2d68669f31 --- /dev/null +++ b/vendor/github.com/docker/distribution/.mailmap @@ -0,0 +1,19 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita +Misty Stanley-Jones Misty Stanley-Jones diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS new file mode 100644 index 0000000000..aaf0298714 --- /dev/null +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -0,0 +1,182 @@ +Aaron Lehmann +Aaron Schlesinger +Aaron Vinson +Adam Duke +Adam Enger +Adrian Mouat +Ahmet Alp Balkan +Alex Chan +Alex Elman +Alexey Gladkov +allencloud +amitshukla +Amy Lindburg +Andrew Hsu +Andrew Meredith +Andrew T Nguyen +Andrey Kostov +Andy Goldstein +Anis Elleuch +Antonio Mercado +Antonio Murdaca +Anton Tiurin +Anusha Ragunathan +a-palchikov +Arien Holthuizen +Arnaud Porterie +Arthur Baars +Asuka Suzuki +Avi Miller +Ayose Cazorla +BadZen +Ben Bodenmiller +Ben Firshman +bin liu +Brian Bland +burnettk +Carson A +Cezar Sa Espinola +Charles Smith +Chris Dillon +cuiwei13 +cyli +Daisuke Fujita +Daniel Huhn +Darren Shepherd +Dave Trombley +Dave Tucker +David Lawrence +davidli +David Verhasselt +David Xia +Dejan Golja +Derek McGowan +Diogo Mónica +DJ Enriquez +Donald Huang +Doug Davis +Edgar Lee +Eric Yang +Fabio Berchtold +Fabio Huser +farmerworking +Felix Yan +Florentin Raud +Frank Chen +Frederick F. Kautz IV +gabriell nascimento +Gleb Schukin +harche +Henri Gomez +Hua Wang +Hu Keping +HuKeping +Ian Babrou +igayoso +Jack Griffin +James Findley +Jason Freidman +Jason Heiss +Jeff Nickoloff +Jess Frazelle +Jessie Frazelle +jhaohai +Jianqing Wang +Jihoon Chung +Joao Fernandes +John Mulhausen +John Starks +Jonathan Boulle +Jon Johnson +Jon Poler +Jordan Liggitt +Josh Chorlton +Josh Hawn +Julien Fernandez +Keerthan Mala +Kelsey Hightower +Kenneth Lim +Kenny Leung +Ke Xu +liuchang0812 +Liu Hua +Li Yi +Lloyd Ramey +Louis Kottmann +Luke Carpenter +Marcus Martins +Mary Anthony +Matt Bentley +Matt Duch +Matthew Green +Matt Moore +Matt Robenolt +Michael Prokop +Michal Minar +Michal Minář +Mike Brown +Miquel Sabaté +Misty Stanley-Jones +Morgan Bauer +moxiegirl +Nathan Sullivan +nevermosby +Nghia Tran +Nikita Tarasov +Noah Treuhaft +Nuutti Kotivuori +Oilbeater +Olivier Gambier +Olivier Jacques +Omer Cohen +Patrick Devine +Phil Estes +Philip Misiowiec +Pierre-Yves Ritschard +Qiao Anran +Randy Barlow +Richard Scothern +Rodolfo Carvalho +Rusty Conover +Sean Boran +Sebastiaan van Stijn +Sebastien Coavoux +Serge Dubrouski +Sharif Nassar +Shawn Falkner-Horine +Shreyas Karnik +Simon Thulbourn +spacexnice +Spencer Rinehart +Stan Hu +Stefan Majewsky +Stefan Weil +Stephen J Day +Sungho Moon +Sven Dowideit +Sylvain Baubeau +Ted Reed +tgic +Thomas Sjögren +Tianon Gravi +Tibor Vass +Tonis Tiigi +Tony Holdstock-Brown +Trevor Pounds +Troels Thomsen +Victoria Bialas +Victor Vieux +Vincent Batts +Vincent Demeester +Vincent Giersch +weiyuan.yl +W. Trevor King +xg.song +xiekeyang +Yann ROBERT +yaoyao.xyy +yixi zhang +yuexiao-wang +yuzou +zhouhaibing089 +姜继忠 diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 0000000000..2d5a101190 --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,119 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/tools/godep github.com/golang/lint/golint + +**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/Sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendored +Godeps directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry -version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md new file mode 100644 index 0000000000..b1a5c6824d --- /dev/null +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -0,0 +1,114 @@ +# Changelog + +## 2.6.1 (2017-04-05) + +#### Registry +- Fix `Forwarded` header handling, revert use of `X-Forwarded-Port` +- Use driver `Stat` for registry health check + +## 2.6.0 (2017-01-18) + +#### Storage +- S3: fixed bug in delete due to read-after-write inconsistency +- S3: allow EC2 IAM roles to be used when authorizing region endpoints +- S3: add Object ACL Support +- S3: fix delete method's notion of subpaths +- S3: use multipart upload API in `Move` method for performance +- S3: add v2 signature signing for legacy S3 clones +- Swift: add simple heuristic to detect incomplete DLOs during read ops +- Swift: support different user and tenant domains +- Swift: bulk deletes in chunks +- Aliyun OSS: fix delete method's notion of subpaths +- Aliyun OSS: optimize data copy after upload finishes +- Azure: close leaking response body +- Fix storage drivers dropping non-EOF errors when listing repositories +- Compare path properly when listing repositories in catalog +- Add a foreign layer URL host whitelist +- Improve catalog enumerate runtime + +#### Registry +- Export `storage.CreateOptions` in top-level package +- Enable notifications to endpoints that use self-signed certificates +- Properly validate multi-URL foreign layers +- Add control over validation of URLs in pushed manifests +- Proxy mode: fix socket leak when pull is cancelled +- Tag service: properly handle error responses on HEAD request +- Support for custom authentication URL in proxying registry +- Add configuration option to disable access logging +- Add notification filtering by target media type +- Manifest: `References()` returns all children +- Honor `X-Forwarded-Port` and Forwarded headers +- Reference: Preserve tag and digest in With* functions +- Add policy configuration for enforcing repository classes + +#### Client +- Changes the client Tags `All()` method to follow links +- Allow registry clients to connect via HTTP2 +- Better handling of OAuth errors in client + +#### Spec +- Manifest: clarify relationship between urls and foreign layers +- Authorization: add support for repository classes + +#### Manifest +- Override media type returned from `Stat()` for existing manifests +- Add plugin mediatype to distribution manifest + +#### Docs +- Document `TOOMANYREQUESTS` error code +- Document required Let's Encrypt port +- Improve documentation around implementation of OAuth2 +- Improve documentation for configuration + +#### Auth +- Add support for registry type in scope +- Add support for using v2 ping challenges for v1 +- Add leeway to JWT `nbf` and `exp` checking +- htpasswd: dynamically parse htpasswd file +- Fix missing auth headers with PATCH HTTP request when pushing to default port + +#### Dockerfile +- Update to go1.7 +- Reorder Dockerfile steps for better layer caching + +#### Notes + +Documentation has moved to the documentation repository at +`github.com/docker/docker.github.io/tree/master/registry` + +The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. + + +## 2.5.0 (2016-06-14) + +#### Storage +- Ensure uploads directory is cleaned after upload is committed +- Add ability to cap concurrent operations in filesystem driver +- S3: Add 'us-gov-west-1' to the valid region list +- Swift: Handle ceph not returning Last-Modified header for HEAD requests +- Add redirect middleware + +#### Registry +- Add support for blobAccessController middleware +- Add support for layers from foreign sources +- Remove signature store +- Add support for Let's Encrypt +- Correct yaml key names in configuration + +#### Client +- Add option to get content digest from manifest get + +#### Spec +- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported +- Clarify API documentation around catalog fetch behavior + +#### API +- Support returning HTTP 429 (Too Many Requests) + +#### Documentation +- Update auth documentation examples to show "expires in" as int + +#### Docker Image +- Use Alpine Linux as base image + + diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 0000000000..7cc7aedffe --- /dev/null +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,140 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry -version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile new file mode 100644 index 0000000000..426954a112 --- /dev/null +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -0,0 +1,18 @@ +FROM golang:1.7-alpine + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV DOCKER_BUILDTAGS include_oss include_gcs + +RUN set -ex \ + && apk add --no-cache make git + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml + +RUN make PREFIX=/go clean binaries + +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 0000000000..bda400150c --- /dev/null +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,58 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "dmcgowan", + "dmp42", + "richardscothern", + "shykes", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.richardscothern] + Name = "Richard Scothern" + Email = "richard.scothern@gmail.com" + GitHub = "richardscothern" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile new file mode 100644 index 0000000000..47b8f1d0b2 --- /dev/null +++ b/vendor/github.com/docker/distribution/Makefile @@ -0,0 +1,109 @@ +# Set an output prefix, which is the local directory if not specified +PREFIX?=$(shell pwd) + + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" + +.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet +.DEFAULT: all +all: fmt vet lint build test binaries + +AUTHORS: .mailmap .git/HEAD + git log --format='%aN <%aE>' | sort -fu > $@ + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + ./version/version.sh > $@ + +# Required for go 1.5 to build +GO15VENDOREXPERIMENT := 1 + +# Go files +GOFILES=$(shell find . -type f -name '*.go') + +# Package list +PKGS=$(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) + +# Resolving binary dependencies for specific targets +GOLINT=$(shell which golint || echo '') +GODEP=$(shell which godep || echo '') + +${PREFIX}/bin/registry: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry + +${PREFIX}/bin/digest: $(GOFILES) + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest + +${PREFIX}/bin/registry-api-descriptor-template: $(GOFILES) + @echo "+ $@" + @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template + +docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template + ./bin/registry-api-descriptor-template $< > $@ + +vet: + @echo "+ $@" + @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +fmt: + @echo "+ $@" + @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ + (echo >&2 "+ please format Go code with 'gofmt -s'" && false) + +lint: + @echo "+ $@" + $(if $(GOLINT), , \ + $(error Please install golint: `go get -u github.com/golang/lint/golint`)) + @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" + +build: + @echo "+ $@" + @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) + +test: + @echo "+ $@" + @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +test-full: + @echo "+ $@" + @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) + +binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template + @echo "+ $@" + +clean: + @echo "+ $@" + @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" + +dep-save: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) save $(PKGS) + +dep-restore: + @echo "+ $@" + $(if $(GODEP), , \ + $(error Please install godep: go get github.com/tools/godep)) + @$(GODEP) restore -v + +dep-validate: dep-restore + @echo "+ $@" + @rm -Rf .vendor.bak + @mv vendor .vendor.bak + @rm -Rf Godeps + @$(GODEP) save ./... + @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ + (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) + @rm -Rf .vendor.bak diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md new file mode 100644 index 0000000000..a6e8db0fb7 --- /dev/null +++ b/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,131 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator] +(https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md new file mode 100644 index 0000000000..49235cecda --- /dev/null +++ b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md @@ -0,0 +1,36 @@ +## Registry Release Checklist + +10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file. + +20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` + +30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. + + ``` +make AUTHORS +``` + +40. Create a signed tag. + + Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]` +You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes. + +50. Push the signed tag + +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. + +70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. + +80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. +e.g. to release `2.3.1` + + `2.3.1 (new)` + + `2.3.0 -> 2.3.0` can be removed + + `2 -> 2.3.1` + + `2.3 -> 2.3.1` + +90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 0000000000..701127afec --- /dev/null +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 0000000000..1f91ae21e9 --- /dev/null +++ b/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,257 @@ +package distribution + +import ( + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" + "github.com/docker/distribution/reference" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identifed by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml new file mode 100644 index 0000000000..61f8be0cb5 --- /dev/null +++ b/vendor/github.com/docker/distribution/circle.yml @@ -0,0 +1,93 @@ +# Pony-up! +machine: + pre: + # Install gvm + - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) + # Install codecov for coverage + - pip install --user codecov + + post: + # go + - gvm install go1.7 --prefer-binary --name=stable + + environment: + # Convenient shortcuts to "common" locations + CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME + BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME + # Trick circle brainflat "no absolute path" behavior + BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR + DOCKER_BUILDTAGS: "include_oss include_gcs" + # Workaround Circle parsing dumb bugs and/or YAML wonkyness + CIRCLE_PAIN: "mode: set" + + hosts: + # Not used yet + fancy: 127.0.0.1 + +dependencies: + pre: + # Copy the code to the gopath of all go versions + - > + gvm use stable && + mkdir -p "$(dirname $BASE_STABLE)" && + cp -R "$CHECKOUT" "$BASE_STABLE" + + override: + # Install dependencies for every copied clone/go version + - gvm use stable && go get github.com/tools/godep: + pwd: $BASE_STABLE + + post: + # For the stable go version, additionally install linting tools + - > + gvm use stable && + go get github.com/axw/gocov/gocov github.com/golang/lint/golint + +test: + pre: + # Output the go versions we are going to test + # - gvm use old && go version + - gvm use stable && go version + + # todo(richard): replace with a more robust vendoring solution. Removed due to a fundamental disagreement in godep philosophies. + # Ensure validation of dependencies + # - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: + # pwd: $BASE_STABLE + + # First thing: build everything. This will catch compile errors, and it's + # also necessary for go vet to work properly (see #807). + - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): + pwd: $BASE_STABLE + + # FMT + - gvm use stable && make fmt: + pwd: $BASE_STABLE + + # VET + - gvm use stable && make vet: + pwd: $BASE_STABLE + + # LINT + - gvm use stable && make lint: + pwd: $BASE_STABLE + + override: + # Test stable, and report + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + + # Test stable with race + - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | grep -v "registry/handlers" | grep -v "registry/storage/driver" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -race -tags "$DOCKER_BUILDTAGS" -test.short $PACKAGE': + timeout: 1000 + pwd: $BASE_STABLE + post: + # Report to codecov + - bash <(curl -s https://codecov.io/bash): + pwd: $BASE_STABLE + + ## Notes + # Do we want these as well? + # - go get code.google.com/p/go.tools/cmd/goimports + # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" + # http://labix.org/gocheck diff --git a/vendor/github.com/docker/distribution/coverpkg.sh b/vendor/github.com/docker/distribution/coverpkg.sh new file mode 100755 index 0000000000..25d419ae82 --- /dev/null +++ b/vendor/github.com/docker/distribution/coverpkg.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Given a subpackage and the containing package, figures out which packages +# need to be passed to `go test -coverpkg`: this includes all of the +# subpackage's dependencies within the containing package, as well as the +# subpackage itself. +DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" +echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/github.com/docker/distribution/digest/digest.go b/vendor/github.com/docker/distribution/digest/digest.go new file mode 100644 index 0000000000..31d821bba7 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digest.go @@ -0,0 +1,139 @@ +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +const ( + // DigestSha256EmptyTar is the canonical sha256 digest of empty data + DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return Digest(fmt.Sprintf("%s:%x", alg, p)) +} + +// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. +func NewDigestFromHex(alg, hex string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, hex)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// ParseDigest parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func ParseDigest(s string) (Digest, error) { + d := Digest(s) + + return d, d.Validate() +} + +// FromReader returns the most valid digest for the underlying content using +// the canonical digest algorithm. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + + i := strings.Index(s, ":") + if i < 0 { + return ErrDigestInvalidFormat + } + + // case: "sha256:" with no hex. + if i+1 == len(s) { + return ErrDigestInvalidFormat + } + + switch algorithm := Algorithm(s[:i]); algorithm { + case SHA256, SHA384, SHA512: + if algorithm.Size()*2 != len(s[i+1:]) { + return ErrDigestInvalidLength + } + break + default: + return ErrDigestUnsupported + } + + return nil +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Hex returns the hex digest portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Hex() string { + return string(d[d.sepIndex()+1:]) +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic("could not find ':' in digest: " + d) + } + + return i +} diff --git a/vendor/github.com/docker/distribution/digest/digest_test.go b/vendor/github.com/docker/distribution/digest/digest_test.go new file mode 100644 index 0000000000..afb4ebf632 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digest_test.go @@ -0,0 +1,82 @@ +package digest + +import ( + "testing" +) + +func TestParseDigest(t *testing.T) { + for _, testcase := range []struct { + input string + err error + algorithm Algorithm + hex string + }{ + { + input: "sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + algorithm: "sha256", + hex: "e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", + }, + { + input: "sha384:d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + algorithm: "sha384", + hex: "d3fc7881460b7e22e3d172954463dddd7866d17597e7248453c48b3e9d26d9596bf9c4a9cf8072c9d5bad76e19af801d", + }, + { + // empty hex + input: "sha256:", + err: ErrDigestInvalidFormat, + }, + { + // just hex + input: "d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // not hex + input: "sha256:d41d8cd98f00b204e9800m98ecf8427e", + err: ErrDigestInvalidFormat, + }, + { + // too short + input: "sha256:abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + // too short (from different algorithm) + input: "sha512:abcdef0123456789abcdef0123456789abcdef0123456789abcdef0123456789", + err: ErrDigestInvalidLength, + }, + { + input: "foo:d41d8cd98f00b204e9800998ecf8427e", + err: ErrDigestUnsupported, + }, + } { + digest, err := ParseDigest(testcase.input) + if err != testcase.err { + t.Fatalf("error differed from expected while parsing %q: %v != %v", testcase.input, err, testcase.err) + } + + if testcase.err != nil { + continue + } + + if digest.Algorithm() != testcase.algorithm { + t.Fatalf("incorrect algorithm for parsed digest: %q != %q", digest.Algorithm(), testcase.algorithm) + } + + if digest.Hex() != testcase.hex { + t.Fatalf("incorrect hex for parsed digest: %q != %q", digest.Hex(), testcase.hex) + } + + // Parse string return value and check equality + newParsed, err := ParseDigest(digest.String()) + + if err != nil { + t.Fatalf("unexpected error parsing input %q: %v", testcase.input, err) + } + + if newParsed != digest { + t.Fatalf("expected equal: %q != %q", newParsed, digest) + } + } +} diff --git a/vendor/github.com/docker/distribution/digest/digester.go b/vendor/github.com/docker/distribution/digest/digester.go new file mode 100644 index 0000000000..f3105a45b6 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digester.go @@ -0,0 +1,155 @@ +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding + SHA384 Algorithm = "sha384" // sha384 with hex encoding + SHA512 Algorithm = "sha512" // sha512 with hex encoding + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, New and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + return nil +} + +// New returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling New. +func (a Algorithm) New() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.New() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.New() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// TODO(stevvooe): Allow resolution of verifiers using the digest type and +// this registration system. + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/digester_resumable_test.go b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go new file mode 100644 index 0000000000..6ba21c801a --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/digester_resumable_test.go @@ -0,0 +1,21 @@ +// +build !noresumabledigest + +package digest + +import ( + "testing" + + "github.com/stevvooe/resumable" + _ "github.com/stevvooe/resumable/sha256" +) + +// TestResumableDetection just ensures that the resumable capability of a hash +// is exposed through the digester type, which is just a hash plus a Digest +// method. +func TestResumableDetection(t *testing.T) { + d := Canonical.New() + + if _, ok := d.Hash().(resumable.Hash); !ok { + t.Fatalf("expected digester to implement resumable.Hash: %#v, %v", d, d.Hash()) + } +} diff --git a/vendor/github.com/docker/distribution/digest/doc.go b/vendor/github.com/docker/distribution/digest/doc.go new file mode 100644 index 0000000000..f64b0db32b --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/doc.go @@ -0,0 +1,42 @@ +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// In this case, the string "sha256" is the algorithm and the hex bytes are +// the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/docker/distribution/digest/set.go b/vendor/github.com/docker/distribution/digest/set.go new file mode 100644 index 0000000000..4b9313c1ae --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/set.go @@ -0,0 +1,245 @@ +package digest + +import ( + "errors" + "sort" + "strings" + "sync" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg Algorithm + hex string + ) + dgst, err := ParseDigest(d) + if err == ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg Algorithm + val string + digest Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/digest/set_test.go b/vendor/github.com/docker/distribution/digest/set_test.go new file mode 100644 index 0000000000..e9dab8795a --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/set_test.go @@ -0,0 +1,368 @@ +package digest + +import ( + "crypto/sha256" + "encoding/binary" + "math/rand" + "testing" +) + +func assertEqualDigests(t *testing.T, d1, d2 Digest) { + if d1 != d2 { + t.Fatalf("Digests do not match:\n\tActual: %s\n\tExpected: %s", d1, d2) + } +} + +func TestLookup(t *testing.T) { + digests := []Digest{ + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup("54") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[3]) + + dgst, err = dset.Lookup("1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("9876") + if err == nil { + t.Fatal("Expected ambiguous error looking up: 9876") + } + if err != ErrDigestNotFound { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:1234") + if err == nil { + t.Fatal("Expected ambiguous error looking up: sha256:1234") + } + if err != ErrDigestAmbiguous { + t.Fatal(err) + } + + dgst, err = dset.Lookup("sha256:12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) + + dgst, err = dset.Lookup("sha256:12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12346") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[2]) + + dgst, err = dset.Lookup("12345") + if err != nil { + t.Fatal(err) + } + assertEqualDigests(t, dgst, digests[0]) +} + +func TestAddDuplication(t *testing.T) { + digests := []Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha512:65431111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65421111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + "sha512:65321111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + if len(dset.entries) != 8 { + t.Fatal("Invalid dset size") + } + + if err := dset.Add(Digest("sha256:1234511111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 8 { + t.Fatal("Duplicate digest insert allowed") + } + + if err := dset.Add(Digest("sha384:123451111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111")); err != nil { + t.Fatal(err) + } + + if len(dset.entries) != 9 { + t.Fatal("Insert with different algorithm not allowed") + } +} + +func TestRemove(t *testing.T) { + digests, err := createDigests(10) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dgst, err := dset.Lookup(digests[0].String()) + if err != nil { + t.Fatal(err) + } + if dgst != digests[0] { + t.Fatalf("Unexpected digest value:\n\tExpected: %s\n\tActual: %s", digests[0], dgst) + } + + if err := dset.Remove(digests[0]); err != nil { + t.Fatal(err) + } + + if _, err := dset.Lookup(digests[0].String()); err != ErrDigestNotFound { + t.Fatalf("Expected error %v when looking up removed digest, got %v", ErrDigestNotFound, err) + } +} + +func TestAll(t *testing.T) { + digests, err := createDigests(100) + if err != nil { + t.Fatal(err) + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + all := map[Digest]struct{}{} + for _, dgst := range dset.All() { + all[dgst] = struct{}{} + } + + if len(all) != len(digests) { + t.Fatalf("Unexpected number of unique digests found:\n\tExpected: %d\n\tActual: %d", len(digests), len(all)) + } + + for i, dgst := range digests { + if _, ok := all[dgst]; !ok { + t.Fatalf("Missing element at position %d: %s", i, dgst) + } + } + +} + +func assertEqualShort(t *testing.T, actual, expected string) { + if actual != expected { + t.Fatalf("Unexpected short value:\n\tExpected: %s\n\tActual: %s", expected, actual) + } +} + +func TestShortCodeTable(t *testing.T) { + digests := []Digest{ + "sha256:1234111111111111111111111111111111111111111111111111111111111111", + "sha256:1234511111111111111111111111111111111111111111111111111111111111", + "sha256:1234611111111111111111111111111111111111111111111111111111111111", + "sha256:5432111111111111111111111111111111111111111111111111111111111111", + "sha256:6543111111111111111111111111111111111111111111111111111111111111", + "sha256:6432111111111111111111111111111111111111111111111111111111111111", + "sha256:6542111111111111111111111111111111111111111111111111111111111111", + "sha256:6532111111111111111111111111111111111111111111111111111111111111", + } + + dset := NewSet() + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + t.Fatal(err) + } + } + + dump := ShortCodeTable(dset, 2) + + if len(dump) < len(digests) { + t.Fatalf("Error unexpected size: %d, expecting %d", len(dump), len(digests)) + } + assertEqualShort(t, dump[digests[0]], "12341") + assertEqualShort(t, dump[digests[1]], "12345") + assertEqualShort(t, dump[digests[2]], "12346") + assertEqualShort(t, dump[digests[3]], "54") + assertEqualShort(t, dump[digests[4]], "6543") + assertEqualShort(t, dump[digests[5]], "64") + assertEqualShort(t, dump[digests[6]], "6542") + assertEqualShort(t, dump[digests[7]], "653") +} + +func createDigests(count int) ([]Digest, error) { + r := rand.New(rand.NewSource(25823)) + digests := make([]Digest, count) + for i := range digests { + h := sha256.New() + if err := binary.Write(h, binary.BigEndian, r.Int63()); err != nil { + return nil, err + } + digests[i] = NewDigest("sha256", h) + } + return digests, nil +} + +func benchAddNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchLookupNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + shorts := make([]string, 0, n) + for _, short := range ShortCodeTable(dset, shortLen) { + shorts = append(shorts, short) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + if _, err = dset.Lookup(shorts[i%n]); err != nil { + b.Fatal(err) + } + } +} + +func benchRemoveNTable(b *testing.B, n int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + b.StopTimer() + for j := range digests { + if err = dset.Add(digests[j]); err != nil { + b.Fatal(err) + } + } + b.StartTimer() + for j := range digests { + if err = dset.Remove(digests[j]); err != nil { + b.Fatal(err) + } + } + } +} + +func benchShortCodeNTable(b *testing.B, n int, shortLen int) { + digests, err := createDigests(n) + if err != nil { + b.Fatal(err) + } + dset := &Set{entries: digestEntries(make([]*digestEntry, 0, n))} + for i := range digests { + if err := dset.Add(digests[i]); err != nil { + b.Fatal(err) + } + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + ShortCodeTable(dset, shortLen) + } +} + +func BenchmarkAdd10(b *testing.B) { + benchAddNTable(b, 10) +} + +func BenchmarkAdd100(b *testing.B) { + benchAddNTable(b, 100) +} + +func BenchmarkAdd1000(b *testing.B) { + benchAddNTable(b, 1000) +} + +func BenchmarkRemove10(b *testing.B) { + benchRemoveNTable(b, 10) +} + +func BenchmarkRemove100(b *testing.B) { + benchRemoveNTable(b, 100) +} + +func BenchmarkRemove1000(b *testing.B) { + benchRemoveNTable(b, 1000) +} + +func BenchmarkLookup10(b *testing.B) { + benchLookupNTable(b, 10, 12) +} + +func BenchmarkLookup100(b *testing.B) { + benchLookupNTable(b, 100, 12) +} + +func BenchmarkLookup1000(b *testing.B) { + benchLookupNTable(b, 1000, 12) +} + +func BenchmarkShortCode10(b *testing.B) { + benchShortCodeNTable(b, 10, 12) +} +func BenchmarkShortCode100(b *testing.B) { + benchShortCodeNTable(b, 100, 12) +} +func BenchmarkShortCode1000(b *testing.B) { + benchShortCodeNTable(b, 1000, 12) +} diff --git a/vendor/github.com/docker/distribution/digest/verifiers.go b/vendor/github.com/docker/distribution/digest/verifiers.go new file mode 100644 index 0000000000..9af3be1341 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers.go @@ -0,0 +1,44 @@ +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +// NewDigestVerifier returns a verifier that compares the written bytes +// against a passed in digest. +func NewDigestVerifier(d Digest) (Verifier, error) { + if err := d.Validate(); err != nil { + return nil, err + } + + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + }, nil +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/docker/distribution/digest/verifiers_test.go b/vendor/github.com/docker/distribution/digest/verifiers_test.go new file mode 100644 index 0000000000..c342d6e7c8 --- /dev/null +++ b/vendor/github.com/docker/distribution/digest/verifiers_test.go @@ -0,0 +1,49 @@ +package digest + +import ( + "bytes" + "crypto/rand" + "io" + "testing" +) + +func TestDigestVerifier(t *testing.T) { + p := make([]byte, 1<<20) + rand.Read(p) + digest := FromBytes(p) + + verifier, err := NewDigestVerifier(digest) + if err != nil { + t.Fatalf("unexpected error getting digest verifier: %s", err) + } + + io.Copy(verifier, bytes.NewReader(p)) + + if !verifier.Verified() { + t.Fatalf("bytes not verified") + } +} + +// TestVerifierUnsupportedDigest ensures that unsupported digest validation is +// flowing through verifier creation. +func TestVerifierUnsupportedDigest(t *testing.T) { + unsupported := Digest("bean:0123456789abcdef") + + _, err := NewDigestVerifier(unsupported) + if err == nil { + t.Fatalf("expected error when creating verifier") + } + + if err != ErrDigestUnsupported { + t.Fatalf("incorrect error for unsupported digest: %v", err) + } +} + +// TODO(stevvooe): Add benchmarks to measure bytes/second throughput for +// DigestVerifier. +// +// The relevant benchmark for comparison can be run with the following +// commands: +// +// go test -bench . crypto/sha1 +// diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 0000000000..bdd8cb708e --- /dev/null +++ b/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 0000000000..c20f28113c --- /dev/null +++ b/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,115 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return fmt.Sprintf("unverified manifest") +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 0000000000..c4fb63450b --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "fmt" + "mime" + + "github.com/docker/distribution/context" + "github.com/docker/distribution/digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the mediatype. + Payload() (mediatype string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediatype string + if ctHeader != "" { + var err error + mediatype, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediatype] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { + if _, ok := mappings[mediatype]; ok { + return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) + } + mappings[mediatype] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 0000000000..02786628e8 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,370 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [hostname '/'] component ['/' component]* +// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] +// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +package reference + +import ( + "errors" + "fmt" + "path" + "strings" + + "github.com/docker/distribution/digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with hostname and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +func SplitHostname(named Named) (string, string) { + name := named.Name() + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + ref := reference{ + name: matches[1], + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.ParseDigest(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name, otherwise an error is +// returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + ref, err := Parse(s) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + if !anchoredNameRegexp.MatchString(name) { + return nil, ErrReferenceInvalidFormat + } + return repository(name), nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + if canonical, ok := name.(Canonical); ok { + return reference{ + name: name.Name(), + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + name: name.Name(), + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + if tagged, ok := name.(Tagged); ok { + return reference{ + name: name.Name(), + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + name: name.Name(), + digest: digest, + }, nil +} + +// Match reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func Match(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, ref.String()) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, namedRef.Name()) + } + return matched, err +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + return repository(ref.Name()) +} + +func getBestReferenceType(ref reference) Reference { + if ref.name == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + name: ref.name, + digest: ref.digest, + } + } + return repository(ref.name) + } + if ref.digest == "" { + return taggedReference{ + name: ref.name, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + name string + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.name + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Name() string { + return r.name +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository string + +func (r repository) String() string { + return string(r) +} + +func (r repository) Name() string { + return string(r) +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return d.String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + name string + tag string +} + +func (t taggedReference) String() string { + return t.name + ":" + t.tag +} + +func (t taggedReference) Name() string { + return t.name +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + name string + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.name + "@" + c.digest.String() +} + +func (c canonicalReference) Name() string { + return c.name +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/reference_test.go b/vendor/github.com/docker/distribution/reference/reference_test.go new file mode 100644 index 0000000000..405c47c050 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference_test.go @@ -0,0 +1,661 @@ +package reference + +import ( + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/docker/distribution/digest" +) + +func TestReferenceParse(t *testing.T) { + // referenceTestcases is a unified set of testcases for + // testing the parsing of references + referenceTestcases := []struct { + // input is the repository name or name component testcase + input string + // err is the error expected from Parse, or nil + err error + // repository is the string representation for the reference + repository string + // hostname is the hostname expected in the reference + hostname string + // tag is the tag for the reference + tag string + // digest is the digest for the reference (enforces digest reference) + digest string + }{ + { + input: "test_com", + repository: "test_com", + }, + { + input: "test.com:tag", + repository: "test.com", + tag: "tag", + }, + { + input: "test.com:5000", + repository: "test.com", + tag: "5000", + }, + { + input: "test.com/repo:tag", + hostname: "test.com", + repository: "test.com/repo", + tag: "tag", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "test:5000/repo:tag", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + }, + { + input: "test:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + hostname: "test:5000", + repository: "test:5000/repo", + tag: "tag", + digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "test:5000/repo", + hostname: "test:5000", + repository: "test:5000/repo", + }, + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "repo@sha256:ffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestInvalidLength, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: digest.ErrDigestUnsupported, + }, + { + input: "Uppercase:tag", + err: ErrNameContainsUppercase, + }, + // FIXME "Uppercase" is incorrectly handled as a domain-name here, therefore passes. + // See https://github.com/docker/distribution/pull/1778, and https://github.com/docker/docker/pull/20175 + //{ + // input: "Uppercase/lowercase:tag", + // err: ErrNameContainsUppercase, + //}, + { + input: "test:5000/Uppercase/lowercase:tag", + err: ErrNameContainsUppercase, + }, + { + input: "lowercase:Uppercase", + repository: "lowercase", + tag: "Uppercase", + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: strings.Repeat("a/", 127) + "a:tag-puts-this-over-max", + hostname: "a", + repository: strings.Repeat("a/", 127) + "a", + tag: "tag-puts-this-over-max", + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + }, + { + input: "sub-dom1.foo.com/bar/baz/quux:some-long-tag", + hostname: "sub-dom1.foo.com", + repository: "sub-dom1.foo.com/bar/baz/quux", + tag: "some-long-tag", + }, + { + input: "b.gcr.io/test.example.com/my-app:test.example.com", + hostname: "b.gcr.io", + repository: "b.gcr.io/test.example.com/my-app", + tag: "test.example.com", + }, + { + input: "xn--n3h.com/myimage:xn--n3h.com", // ☃.com in punycode + hostname: "xn--n3h.com", + repository: "xn--n3h.com/myimage", + tag: "xn--n3h.com", + }, + { + input: "xn--7o8h.com/myimage:xn--7o8h.com@sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", // 🐳.com in punycode + hostname: "xn--7o8h.com", + repository: "xn--7o8h.com/myimage", + tag: "xn--7o8h.com", + digest: "sha512:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + }, + { + input: "foo_bar.com:8080", + repository: "foo_bar.com", + tag: "8080", + }, + { + input: "foo/foo_bar.com:8080", + hostname: "foo", + repository: "foo/foo_bar.com", + tag: "8080", + }, + } + for _, testcase := range referenceTestcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + repo, err := Parse(testcase.input) + if testcase.err != nil { + if err == nil { + failf("missing expected error: %v", testcase.err) + } else if testcase.err != err { + failf("mismatched error: got %v, expected %v", err, testcase.err) + } + continue + } else if err != nil { + failf("unexpected parse error: %v", err) + continue + } + if repo.String() != testcase.input { + failf("mismatched repo: got %q, expected %q", repo.String(), testcase.input) + } + + if named, ok := repo.(Named); ok { + if named.Name() != testcase.repository { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.repository) + } + hostname, _ := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + } else if testcase.repository != "" || testcase.hostname != "" { + failf("expected named type, got %T", repo) + } + + tagged, ok := repo.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", repo) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := repo.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", repo) + } + } else if ok { + failf("unexpected digested type") + } + + } +} + +// TestWithNameFailure tests cases where WithName should fail. Cases where it +// should succeed are covered by TestSplitHostname, below. +func TestWithNameFailure(t *testing.T) { + testcases := []struct { + input string + err error + }{ + { + input: "", + err: ErrNameEmpty, + }, + { + input: ":justtag", + err: ErrReferenceInvalidFormat, + }, + { + input: "@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: "validname@invaliddigest:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + err: ErrReferenceInvalidFormat, + }, + { + input: strings.Repeat("a/", 128) + "a:tag", + err: ErrNameTooLong, + }, + { + input: "aa/asdf$$^/aa", + err: ErrReferenceInvalidFormat, + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + _, err := WithName(testcase.input) + if err == nil { + failf("no error parsing name. expected: %s", testcase.err) + } + } +} + +func TestSplitHostname(t *testing.T) { + testcases := []struct { + input string + hostname string + name string + }{ + { + input: "test.com/foo", + hostname: "test.com", + name: "foo", + }, + { + input: "test_com/foo", + hostname: "", + name: "test_com/foo", + }, + { + input: "test:8080/foo", + hostname: "test:8080", + name: "foo", + }, + { + input: "test.com:8080/foo", + hostname: "test.com:8080", + name: "foo", + }, + { + input: "test-com:8080/foo", + hostname: "test-com:8080", + name: "foo", + }, + { + input: "xn--n3h.com:18080/foo", + hostname: "xn--n3h.com:18080", + name: "foo", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.input) + if err != nil { + failf("error parsing name: %s", err) + } + hostname, name := SplitHostname(named) + if hostname != testcase.hostname { + failf("unexpected hostname: got %q, expected %q", hostname, testcase.hostname) + } + if name != testcase.name { + failf("unexpected name: got %q, expected %q", name, testcase.name) + } + } +} + +type serializationType struct { + Description string + Field Field +} + +func TestSerialization(t *testing.T) { + testcases := []struct { + description string + input string + name string + tag string + digest string + err error + }{ + { + description: "empty value", + err: ErrNameEmpty, + }, + { + description: "just a name", + input: "example.com:8000/named", + name: "example.com:8000/named", + }, + { + description: "name with a tag", + input: "example.com:8000/named:tagged", + name: "example.com:8000/named", + tag: "tagged", + }, + { + description: "name with digest", + input: "other.com/named@sha256:1234567890098765432112345667890098765432112345667890098765432112", + name: "other.com/named", + digest: "sha256:1234567890098765432112345667890098765432112345667890098765432112", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.input)+": "+format, v...) + t.Fail() + } + + m := map[string]string{ + "Description": testcase.description, + "Field": testcase.input, + } + b, err := json.Marshal(m) + if err != nil { + failf("error marshalling: %v", err) + } + t := serializationType{} + + if err := json.Unmarshal(b, &t); err != nil { + if testcase.err == nil { + failf("error unmarshalling: %v", err) + } + if err != testcase.err { + failf("wrong error, expected %v, got %v", testcase.err, err) + } + + continue + } else if testcase.err != nil { + failf("expected error unmarshalling: %v", testcase.err) + } + + if t.Description != testcase.description { + failf("wrong description, expected %q, got %q", testcase.description, t.Description) + } + + ref := t.Field.Reference() + + if named, ok := ref.(Named); ok { + if named.Name() != testcase.name { + failf("unexpected repository: got %q, expected %q", named.Name(), testcase.name) + } + } else if testcase.name != "" { + failf("expected named type, got %T", ref) + } + + tagged, ok := ref.(Tagged) + if testcase.tag != "" { + if ok { + if tagged.Tag() != testcase.tag { + failf("unexpected tag: got %q, expected %q", tagged.Tag(), testcase.tag) + } + } else { + failf("expected tagged type, got %T", ref) + } + } else if ok { + failf("unexpected tagged type") + } + + digested, ok := ref.(Digested) + if testcase.digest != "" { + if ok { + if digested.Digest().String() != testcase.digest { + failf("unexpected digest: got %q, expected %q", digested.Digest().String(), testcase.digest) + } + } else { + failf("expected digested type, got %T", ref) + } + } else if ok { + failf("unexpected digested type") + } + + t = serializationType{ + Description: testcase.description, + Field: AsField(ref), + } + + b2, err := json.Marshal(t) + if err != nil { + failf("error marshing serialization type: %v", err) + } + + if string(b) != string(b2) { + failf("unexpected serialized value: expected %q, got %q", string(b), string(b2)) + } + + // Ensure t.Field is not implementing "Reference" directly, getting + // around the Reference type system + var fieldInterface interface{} = t.Field + if _, ok := fieldInterface.(Reference); ok { + failf("field should not implement Reference interface") + } + + } +} + +func TestWithTag(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + tag: "tag", + combined: "test.com/foo:tag", + }, + { + name: "foo", + tag: "tag2", + combined: "foo:tag2", + }, + { + name: "test.com:8000/foo", + tag: "tag4", + combined: "test.com:8000/foo:tag4", + }, + { + name: "test.com:8000/foo", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "TAG5", + combined: "test.com:8000/foo:TAG5@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.digest != "" { + canonical, err := WithDigest(named, testcase.digest) + if err != nil { + failf("error adding digest") + } + named = canonical + } + + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("WithTag failed: %s", err) + } + if tagged.String() != testcase.combined { + failf("unexpected: got %q, expected %q", tagged.String(), testcase.combined) + } + } +} + +func TestWithDigest(t *testing.T) { + testcases := []struct { + name string + digest digest.Digest + tag string + combined string + }{ + { + name: "test.com/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + combined: "test.com:8000/foo@sha256:1234567890098765432112345667890098765", + }, + { + name: "test.com:8000/foo", + digest: "sha256:1234567890098765432112345667890098765", + tag: "latest", + combined: "test.com:8000/foo:latest@sha256:1234567890098765432112345667890098765", + }, + } + for _, testcase := range testcases { + failf := func(format string, v ...interface{}) { + t.Logf(strconv.Quote(testcase.name)+": "+format, v...) + t.Fail() + } + + named, err := WithName(testcase.name) + if err != nil { + failf("error parsing name: %s", err) + } + if testcase.tag != "" { + tagged, err := WithTag(named, testcase.tag) + if err != nil { + failf("error adding tag") + } + named = tagged + } + digested, err := WithDigest(named, testcase.digest) + if err != nil { + failf("WithDigest failed: %s", err) + } + if digested.String() != testcase.combined { + failf("unexpected: got %q, expected %q", digested.String(), testcase.combined) + } + } +} + +func TestMatchError(t *testing.T) { + named, err := Parse("foo") + if err != nil { + t.Fatal(err) + } + _, err = Match("[-x]", named) + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestMatch(t *testing.T) { + matchCases := []struct { + reference string + pattern string + expected bool + }{ + { + reference: "foo", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/any/bat", + pattern: "foo/**/ba[rz]", + expected: false, + }, + { + reference: "foo/a/bar", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/b/baz", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/**/ba[rz]", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/*/baz:tag", + expected: true, + }, + { + reference: "foo/c/baz:tag", + pattern: "foo/c/baz:tag", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "*/foo/c/baz", + expected: true, + }, + { + reference: "example.com/foo/c/baz:tag", + pattern: "example.com/foo/c/baz", + expected: true, + }, + } + for _, c := range matchCases { + named, err := Parse(c.reference) + if err != nil { + t.Fatal(err) + } + actual, err := Match(c.pattern, named) + if err != nil { + t.Fatal(err) + } + if actual != c.expected { + t.Fatalf("expected %s match %s to be %v, was %v", c.reference, c.pattern, c.expected, actual) + } + } +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 0000000000..9a7d366bc8 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,124 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // hostnameComponentRegexp restricts the registry hostname component of a + // repository name to start with a component as defined by hostnameRegexp + // and followed by an optional port. + hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // hostnameRegexp defines the structure of potential hostname components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + hostnameRegexp = expression( + hostnameComponentRegexp, + optional(repeated(literal(`.`), hostnameComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the hostname and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(hostnameRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // hostname and trailing components. + anchoredNameRegexp = anchored( + optional(capture(hostnameRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/reference/regexp_test.go b/vendor/github.com/docker/distribution/reference/regexp_test.go new file mode 100644 index 0000000000..2ec39377af --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp_test.go @@ -0,0 +1,489 @@ +package reference + +import ( + "regexp" + "strings" + "testing" +) + +type regexpMatch struct { + input string + match bool + subs []string +} + +func checkRegexp(t *testing.T, r *regexp.Regexp, m regexpMatch) { + matches := r.FindStringSubmatch(m.input) + if m.match && matches != nil { + if len(matches) != (r.NumSubexp()+1) || matches[0] != m.input { + t.Fatalf("Bad match result %#v for %q", matches, m.input) + } + if len(matches) < (len(m.subs) + 1) { + t.Errorf("Expected %d sub matches, only have %d for %q", len(m.subs), len(matches)-1, m.input) + } + for i := range m.subs { + if m.subs[i] != matches[i+1] { + t.Errorf("Unexpected submatch %d: %q, expected %q for %q", i+1, matches[i+1], m.subs[i], m.input) + } + } + } else if m.match { + t.Errorf("Expected match for %q", m.input) + } else if matches != nil { + t.Errorf("Unexpected match for %q", m.input) + } +} + +func TestHostRegexp(t *testing.T) { + hostcases := []regexpMatch{ + { + input: "test.com", + match: true, + }, + { + input: "test.com:10304", + match: true, + }, + { + input: "test.com:http", + match: false, + }, + { + input: "localhost", + match: true, + }, + { + input: "localhost:8080", + match: true, + }, + { + input: "a", + match: true, + }, + { + input: "a.b", + match: true, + }, + { + input: "ab.cd.com", + match: true, + }, + { + input: "a-b.com", + match: true, + }, + { + input: "-ab.com", + match: false, + }, + { + input: "ab-.com", + match: false, + }, + { + input: "ab.c-om", + match: true, + }, + { + input: "ab.-com", + match: false, + }, + { + input: "ab.com-", + match: false, + }, + { + input: "0101.com", + match: true, // TODO(dmcgowan): valid if this should be allowed + }, + { + input: "001a.com", + match: true, + }, + { + input: "b.gbc.io:443", + match: true, + }, + { + input: "b.gbc.io", + match: true, + }, + { + input: "xn--n3h.com", // ☃.com in punycode + match: true, + }, + { + input: "Asdf.com", // uppercase character + match: true, + }, + } + r := regexp.MustCompile(`^` + hostnameRegexp.String() + `$`) + for i := range hostcases { + checkRegexp(t, r, hostcases[i]) + } +} + +func TestFullNameRegexp(t *testing.T) { + if anchoredNameRegexp.NumSubexp() != 2 { + t.Fatalf("anchored name regexp should have two submatches: %v, %v != 2", + anchoredNameRegexp, anchoredNameRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "", + match: false, + }, + { + input: "short", + match: true, + subs: []string{"", "short"}, + }, + { + input: "simple/name", + match: true, + subs: []string{"simple", "name"}, + }, + { + input: "library/ubuntu", + match: true, + subs: []string{"library", "ubuntu"}, + }, + { + input: "docker/stevvooe/app", + match: true, + subs: []string{"docker", "stevvooe/app"}, + }, + { + input: "aa/aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/aa/aa/aa/aa/aa/aa/aa/bb/bb/bb/bb/bb/bb"}, + }, + { + input: "aa/aa/bb/bb/bb", + match: true, + subs: []string{"aa", "aa/bb/bb/bb"}, + }, + { + input: "a/a/a/a", + match: true, + subs: []string{"a", "a/a/a"}, + }, + { + input: "a/a/a/a/", + match: false, + }, + { + input: "a//a/a", + match: false, + }, + { + input: "a", + match: true, + subs: []string{"", "a"}, + }, + { + input: "a/aa", + match: true, + subs: []string{"a", "aa"}, + }, + { + input: "a/aa/a", + match: true, + subs: []string{"a", "aa/a"}, + }, + { + input: "foo.com", + match: true, + subs: []string{"", "foo.com"}, + }, + { + input: "foo.com/", + match: false, + }, + { + input: "foo.com:8080/bar", + match: true, + subs: []string{"foo.com:8080", "bar"}, + }, + { + input: "foo.com:http/bar", + match: false, + }, + { + input: "foo.com/bar", + match: true, + subs: []string{"foo.com", "bar"}, + }, + { + input: "foo.com/bar/baz", + match: true, + subs: []string{"foo.com", "bar/baz"}, + }, + { + input: "localhost:8080/bar", + match: true, + subs: []string{"localhost:8080", "bar"}, + }, + { + input: "sub-dom1.foo.com/bar/baz/quux", + match: true, + subs: []string{"sub-dom1.foo.com", "bar/baz/quux"}, + }, + { + input: "blog.foo.com/bar/baz", + match: true, + subs: []string{"blog.foo.com", "bar/baz"}, + }, + { + input: "a^a", + match: false, + }, + { + input: "aa/asdf$$^/aa", + match: false, + }, + { + input: "asdf$$^/aa", + match: false, + }, + { + input: "aa-a/a", + match: true, + subs: []string{"aa-a", "a"}, + }, + { + input: strings.Repeat("a/", 128) + "a", + match: true, + subs: []string{"a", strings.Repeat("a/", 127) + "a"}, + }, + { + input: "a-/a/a/a", + match: false, + }, + { + input: "foo.com/a-/a/a", + match: false, + }, + { + input: "-foo/bar", + match: false, + }, + { + input: "foo/bar-", + match: false, + }, + { + input: "foo-/bar", + match: false, + }, + { + input: "foo/-bar", + match: false, + }, + { + input: "_foo/bar", + match: false, + }, + { + input: "foo_bar", + match: true, + subs: []string{"", "foo_bar"}, + }, + { + input: "foo_bar.com", + match: true, + subs: []string{"", "foo_bar.com"}, + }, + { + input: "foo_bar.com:8080", + match: false, + }, + { + input: "foo_bar.com:8080/app", + match: false, + }, + { + input: "foo.com/foo_bar", + match: true, + subs: []string{"foo.com", "foo_bar"}, + }, + { + input: "____/____", + match: false, + }, + { + input: "_docker/_docker", + match: false, + }, + { + input: "docker_/docker_", + match: false, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "xn--n3h.com/myimage", // ☃.com in punycode + match: true, + subs: []string{"xn--n3h.com", "myimage"}, + }, + { + input: "xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"xn--7o8h.com", "myimage"}, + }, + { + input: "example.com/xn--7o8h.com/myimage", // 🐳.com in punycode + match: true, + subs: []string{"example.com", "xn--7o8h.com/myimage"}, + }, + { + input: "example.com/some_separator__underscore/myimage", + match: true, + subs: []string{"example.com", "some_separator__underscore/myimage"}, + }, + { + input: "example.com/__underscore/myimage", + match: false, + }, + { + input: "example.com/..dots/myimage", + match: false, + }, + { + input: "example.com/.dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "example.com/nodouble..dots/myimage", + match: false, + }, + { + input: "docker./docker", + match: false, + }, + { + input: ".docker/docker", + match: false, + }, + { + input: "docker-/docker", + match: false, + }, + { + input: "-docker/docker", + match: false, + }, + { + input: "do..cker/docker", + match: false, + }, + { + input: "do__cker:8080/docker", + match: false, + }, + { + input: "do__cker/docker", + match: true, + subs: []string{"", "do__cker/docker"}, + }, + { + input: "b.gcr.io/test.example.com/my-app", + match: true, + subs: []string{"b.gcr.io", "test.example.com/my-app"}, + }, + { + input: "registry.io/foo/project--id.module--name.ver---sion--name", + match: true, + subs: []string{"registry.io", "foo/project--id.module--name.ver---sion--name"}, + }, + { + input: "Asdf.com/foo/bar", // uppercase character in hostname + match: true, + }, + { + input: "Foo/FarB", // uppercase characters in remote name + match: false, + }, + } + for i := range testcases { + checkRegexp(t, anchoredNameRegexp, testcases[i]) + } +} + +func TestReferenceRegexp(t *testing.T) { + if ReferenceRegexp.NumSubexp() != 3 { + t.Fatalf("anchored name regexp should have three submatches: %v, %v != 3", + ReferenceRegexp, ReferenceRegexp.NumSubexp()) + } + + testcases := []regexpMatch{ + { + input: "registry.com:8080/myapp:tag", + match: true, + subs: []string{"registry.com:8080/myapp", "tag", ""}, + }, + { + input: "registry.com:8080/myapp@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp:tag2@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"registry.com:8080/myapp", "tag2", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@sha256:badbadbadbad", + match: false, + }, + { + input: "registry.com:8080/myapp:invalid~tag", + match: false, + }, + { + input: "bad_hostname.com:8080/myapp:tag", + match: false, + }, + { + input:// localhost treated as name, missing tag with 8080 as tag + "localhost:8080@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "8080", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:8080/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost:8080/name", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "localhost:http/name@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: false, + }, + { + // localhost will be treated as an image name without a host + input: "localhost@sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912", + match: true, + subs: []string{"localhost", "", "sha256:be178c0543eb17f5f3043021c9e5fcf30285e557a4fc309cce97ff9ca6182912"}, + }, + { + input: "registry.com:8080/myapp@bad", + match: false, + }, + { + input: "registry.com:8080/myapp@2bad", + match: false, // TODO(dmcgowan): Support this as valid + }, + } + + for i := range testcases { + checkRegexp(t, ReferenceRegexp, testcases[i]) + } + +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 0000000000..1ede31ebb6 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,97 @@ +package distribution + +import ( + "github.com/docker/distribution/context" + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 0000000000..5030565963 --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "github.com/docker/distribution/context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/docker/.dockerignore b/vendor/github.com/docker/docker/.dockerignore new file mode 100644 index 0000000000..082cac9224 --- /dev/null +++ b/vendor/github.com/docker/docker/.dockerignore @@ -0,0 +1,4 @@ +bundles +.gopath +vendor/pkg +.go-pkg-cache diff --git a/vendor/github.com/docker/docker/.gitignore b/vendor/github.com/docker/docker/.gitignore new file mode 100644 index 0000000000..be8b03d17b --- /dev/null +++ b/vendor/github.com/docker/docker/.gitignore @@ -0,0 +1,33 @@ +# Docker project generated files to ignore +# if you want to ignore files created by your editor/tools, +# please consider a global .gitignore https://help.github.com/articles/ignoring-files +*.exe +*.exe~ +*.orig +*.test +.*.swp +.DS_Store +# a .bashrc may be added to customize the build environment +.bashrc +.editorconfig +.gopath/ +.go-pkg-cache/ +autogen/ +bundles/ +cmd/dockerd/dockerd +cmd/docker/docker +dockerversion/version_autogen.go +dockerversion/version_autogen_unix.go +docs/AWS_S3_BUCKET +docs/GITCOMMIT +docs/GIT_BRANCH +docs/VERSION +docs/_build +docs/_static +docs/_templates +docs/changed-files +# generated by man/md2man-all.sh +man/man1 +man/man5 +man/man8 +vendor/pkg/ diff --git a/vendor/github.com/docker/docker/.mailmap b/vendor/github.com/docker/docker/.mailmap new file mode 100644 index 0000000000..fe99e2086f --- /dev/null +++ b/vendor/github.com/docker/docker/.mailmap @@ -0,0 +1,275 @@ +# Generate AUTHORS: hack/generate-authors.sh + +# Tip for finding duplicates (besides scanning the output of AUTHORS for name +# duplicates that aren't also email duplicates): scan the output of: +# git log --format='%aE - %aN' | sort -uf +# +# For explanation on this file format: man git-shortlog + +Patrick Stapleton +Shishir Mahajan +Erwin van der Koogh +Ahmed Kamal +Tejesh Mehta +Cristian Staretu +Cristian Staretu +Cristian Staretu +Marcus Linke +Aleksandrs Fadins +Christopher Latham +Hu Keping +Wayne Chang +Chen Chao +Daehyeok Mun + + + + + + +Guillaume J. Charmes + + + + + +Thatcher Peskens +Thatcher Peskens +Thatcher Peskens dhrp +Jérôme Petazzoni jpetazzo +Jérôme Petazzoni +Joffrey F +Joffrey F +Joffrey F +Tim Terhorst +Andy Smith + + + + + + + + + +Walter Stanish + +Roberto Hashioka +Konstantin Pelykh +David Sissitka +Nolan Darilek + +Benoit Chesneau +Jordan Arentsen +Daniel Garcia +Miguel Angel Fernández +Bhiraj Butala +Faiz Khan +Victor Lyuboslavsky +Jean-Baptiste Barth +Matthew Mueller + +Shih-Yuan Lee +Daniel Mizyrycki root +Jean-Baptiste Dalido + + + + + + + + + + + + + + +Sven Dowideit +Sven Dowideit +Sven Dowideit +Sven Dowideit <¨SvenDowideit@home.org.au¨> +Sven Dowideit +Sven Dowideit +Sven Dowideit + +Alexander Morozov +Alexander Morozov + +O.S. Tezer + +Roberto G. Hashioka + + + + + +Sridhar Ratnakumar +Sridhar Ratnakumar +Liang-Chi Hsieh +Aleksa Sarai +Aleksa Sarai +Aleksa Sarai +Will Weaver +Timothy Hobbs +Nathan LeClaire +Nathan LeClaire + + + + +Matthew Heon + + + + +Francisco Carriedo + + + + +Brian Goff + + + +Hollie Teal + + + +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle +Jessica Frazelle + + + + +Thomas LEVEIL Thomas LÉVEIL + + +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Antonio Murdaca +Darren Shepherd +Deshi Xiao +Deshi Xiao +Doug Davis +Jacob Atzen +Jeff Nickoloff +John Howard (VM) +John Howard (VM) +John Howard (VM) +John Howard (VM) +Madhu Venugopal +Mary Anthony +Mary Anthony moxiegirl +Mary Anthony +mattyw +resouer +AJ Bowen soulshake +AJ Bowen soulshake +Tibor Vass +Tibor Vass +Vincent Bernat +Yestin Sun +bin liu +John Howard (VM) jhowardmsft +Ankush Agarwal +Tangi COLIN tangicolin +Allen Sun +Adrien Gallouët + +Anuj Bahuguna +Anusha Ragunathan +Avi Miller +Brent Salisbury +Chander G +Chun Chen +Ying Li +Daehyeok Mun + +Daniel, Dao Quang Minh +Daniel Nephin +Dave Tucker +Doug Tangren +Frederick F. Kautz IV +Ben Golub +Harold Cooper +hsinko <21551195@zju.edu.cn> +Josh Hawn +Justin Cormack + + +Kamil Domański +Lei Jitang + +Linus Heckemann + +Lynda O'Leary + +Marianna Tessel +Michael Huettermann +Moysés Borges + +Nigel Poulton +Qiang Huang + +Boaz Shuster +Shuwei Hao + +Soshi Katsuta + +Stefan Berger + +Stephen Day + +Toli Kuznets +Tristan Carel + +Vincent Demeester + +Vishnu Kannan +xlgao-zju xlgao +yuchangchun y00277921 + + + + +Hao Shu Wei + + + + + + + +Shengbo Song mYmNeo +Shengbo Song + +Sylvain Bellemare + + + +Arnaud Porterie + +David M. Karr + + + +Kenfe-Mickaël Laventure + + + + + +Runshen Zhu +Tom Barlow +Xianlu Bird +Dan Feldman +Harry Zhang diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 0000000000..246e2a33f5 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,1652 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Avilla +Adam Kunk +Adam Miller +Adam Mills +Adam Singer +Adam Walz +Aditi Rajagopal +Aditya +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Richards +amangoel +Amen Belayneh +Amit Bakshi +Amit Krishnan +Amit Shukla +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +ArikaChen +Arnaud Lefebvre +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bilal Amarni +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Davanum Srinivas +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Barch +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Everett Toews +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +fonglh +fortinux +Francesc Campoy +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +frosforever +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +GabrielNicolasAvellaneda +Galen Sampson +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +He Simei +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Ji.Zhilong +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joao Fernandes +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Starks +John Tims +John Warwick +John Willis +johnharris85 +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +kayrus +Ke Xu +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Menard +Kevin P. Kucharczyk +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill Kolyshkin +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin L +Konstantin Pelykh +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +Kyle Linden +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +lalyos +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Laszlo Meszaros +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +lixiaobing10051267 +LIZAO LI +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +Mansi Nahar +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Robenolt +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minar +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Mickaël FORTUNATO +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +nick +Nick DeCoursin +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicola Kabar +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +orkaa +Oskar Niburski +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +pestophagous +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qg <1373319223@qq.com> +qhuang +Qiang Huang +qq690388648 <690388648@qq.com> +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Terhaar +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +Ron Williams +root +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Runshen Zhu +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +sakeven +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +Satoshi Tagomori +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean OMeara +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Serhat Gülçiçek +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +skaasten +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +tpng +tracylihui <793912329@qq.com> +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Tõnis Tiigi +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Xing +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenkai Yin +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +Wonjun Kim +xamyzhao +Xianlu Bird +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yanqiang Miao +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yuan Sun +yuchangchun +yuchengxia +yuexiao-wang +YuPengZTE +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +zhouhao +Zhu Guihua +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/docker/docker/CHANGELOG.md b/vendor/github.com/docker/docker/CHANGELOG.md new file mode 100644 index 0000000000..36bb880639 --- /dev/null +++ b/vendor/github.com/docker/docker/CHANGELOG.md @@ -0,0 +1,3337 @@ +# Changelog + +Items starting with `DEPRECATE` are important deprecation notices. For more +information on the list of deprecated flags and APIs please have a look at +https://docs.docker.com/engine/deprecated/ where target removal dates can also +be found. + +## 1.13.1 (2017-02-08) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Contrib + +* Do not require a custom build of tini [#28454](https://github.com/docker/docker/pull/28454) +* Upgrade to Go 1.7.5 [#30489](https://github.com/docker/docker/pull/30489) + +### Remote API (v1.26) & Client + ++ Support secrets in docker stack deploy with compose file [#30144](https://github.com/docker/docker/pull/30144) + +### Runtime + +* Fix size issue in `docker system df` [#30378](https://github.com/docker/docker/pull/30378) +* Fix error on `docker inspect` when Swarm certificates were expired. [#29246](https://github.com/docker/docker/pull/29246) +* Fix deadlock on v1 plugin with activate error [#30408](https://github.com/docker/docker/pull/30408) +* Fix SELinux regression [#30649](https://github.com/docker/docker/pull/30649) + +### Plugins + +* Support global scoped network plugins (v2) in swarm mode [#30332](https://github.com/docker/docker/pull/30332) ++ Add `docker plugin upgrade` [#29414](https://github.com/docker/docker/pull/29414) + +### Windows + +* Fix small regression with old plugins in Windows [#30150](https://github.com/docker/docker/pull/30150) +* Fix warning on Windows [#30730](https://github.com/docker/docker/pull/30730) + +## 1.13.0 (2017-01-18) + +**IMPORTANT**: On Linux distributions where `devicemapper` was the default storage driver, +the `overlay2`, or `overlay` is now used by default (if the kernel supports it). +To use devicemapper, you can manually configure the storage driver to use through +the `--storage-driver` daemon option, or by setting "storage-driver" in the `daemon.json` +configuration file. + +**IMPORTANT**: In Docker 1.13, the managed plugin api changed, as compared to the experimental +version introduced in Docker 1.12. You must **uninstall** plugins which you installed with Docker 1.12 +_before_ upgrading to Docker 1.13. You can uninstall plugins using the `docker plugin rm` command. + +If you have already upgraded to Docker 1.13 without uninstalling +previously-installed plugins, you may see this message when the Docker daemon +starts: + + Error starting daemon: json: cannot unmarshal string into Go value of type types.PluginEnv + +To manually remove all plugins and resolve this problem, take the following steps: + +1. Remove plugins.json from: `/var/lib/docker/plugins/`. +2. Restart Docker. Verify that the Docker daemon starts with no errors. +3. Reinstall your plugins. + +### Builder + ++ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) ++ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) +* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) +- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) ++ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) ++ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) +- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) +- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) +* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) +- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) ++ Allow `USER` in builder on Windows [#28415](https://github.com/docker/docker/pull/28415) ++ Handle env case-insensitive on Windows [#28725](https://github.com/docker/docker/pull/28725) + +### Contrib + ++ Add support for building docker debs for Ubuntu 16.04 Xenial on PPC64LE [#23438](https://github.com/docker/docker/pull/23438) ++ Add support for building docker debs for Ubuntu 16.04 Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) ++ Add support for building docker debs for Ubuntu 16.10 Yakkety Yak on PPC64LE [#28046](https://github.com/docker/docker/pull/28046) +- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) ++ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) +* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) ++ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) ++ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) ++ Add `make deb` support for aarch64 [#27625](https://github.com/docker/docker/pull/27625) + +### Distribution + +* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) + - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) + - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) + - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) + - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) + - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) + - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) + - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) + - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) +- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) +* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) + +### Logging + +* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) +- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) ++ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) ++ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) ++ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) ++ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) +* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) +- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) +- Fix an issue where `docker logs --tail` returned less lines than expected [#28203](https://github.com/docker/docker/pull/28203) +- Splunk Logging Driver: performance and reliability improvements [#26207](https://github.com/docker/docker/pull/26207) +- Splunk Logging Driver: configurable formats and skip for verifying connection [#25786](https://github.com/docker/docker/pull/25786) + +### Networking + ++ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) ++ Add support for host port PublishMode in services using the `--publish` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) and [#28943](https://github.com/docker/docker/pull/28943) ++ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) +* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) ++ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) +- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) +- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) +- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) ++ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) +- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) +- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) + +### Plugins + +- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) +- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) +* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) ++ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) ++ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) ++ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) +* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) +* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) +* Split `docker plugin install` into two API call `/privileges` and `/pull` [#28963](https://github.com/docker/docker/pull/28963) + +### Remote API (v1.25) & Client + ++ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) ++ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) ++ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) +* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) ++ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) ++ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) ++ Add `--env-file` flag to `docker service create` [#24844](https://github.com/docker/docker/pull/24844) ++ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) ++ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) ++ Add `--no-trunc` to service/node/stack ps output [#25337](https://github.com/docker/docker/pull/25337) ++ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) ++ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) +* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) ++ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) ++ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) +- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) ++ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) ++ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) +* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) ++ Add --cpus flag to control cpu resources for `docker run` and `docker create`, and add `NanoCPUs` to `HostConfig` [#27958](https://github.com/docker/docker/pull/27958) +- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) +* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) +- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) ++ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) +* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) +* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) ++ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) ++ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) ++ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) ++ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) ++ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) + +### Runtime + ++ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) ++ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) ++ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) ++ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) ++ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) ++ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) ++ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) ++ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) +* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) ++ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) +* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) +* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) +- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) +- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) +- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) +- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) +- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) ++ (experimental) Add metrics (Prometheus) output for basic `container`, `image`, and `daemon` operations [#25820](https://github.com/docker/docker/pull/25820) +- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) ++ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) ++ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) ++ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) ++ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) +- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) +- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) +* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) ++ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) +- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) +- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) ++ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) +* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) + +### Swarm Mode + ++ Add secret management [#27794](https://github.com/docker/docker/pull/27794) ++ Add support for templating service options (hostname, mounts, and environment variables) [#28025](https://github.com/docker/docker/pull/28025) +* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) +* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) +* Make `docker node ps` default to the current node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) ++ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) ++ Add `--health-*` and `--no-healthcheck` flags to `docker service create` and `docker service update` [#27369](https://github.com/docker/docker/pull/27369) ++ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) +* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) +- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) +- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) +* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) ++ Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) ++ Add `--host` to `docker service create`, and `--host-add`, `--host-rm` to `docker service update` [#28031](https://github.com/docker/docker/pull/28031) ++ Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) +* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) +* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) ++ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) +- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) ++ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) ++ Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) +* Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) ++ Add options to customize Raft snapshots (`--max-snapshots`, `--snapshot-interval`) [#27997](https://github.com/docker/docker/pull/27997) +- Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) ++ Swarm-mode support for Windows [#27838](https://github.com/docker/docker/pull/27838) ++ Allow hostname to be updated on service [#28771](https://github.com/docker/docker/pull/28771) ++ Support v2 plugins [#29433](https://github.com/docker/docker/pull/29433) ++ Add content trust for services [#29469](https://github.com/docker/docker/pull/29469) + +### Volume + ++ Add support for labels on volumes [#21270](https://github.com/docker/docker/pull/21270) ++ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) +* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) +* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) +* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) + +### Security + +- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) +- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) + +### DEPRECATION + +- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) +- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) +- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) +- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) +- Remove Fedora 23 as supported platform. Fedora 23 is EOL, and no longer receives updates [#29455](https://github.com/docker/docker/pull/29455) +- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) +- Deprecate backing filesystem without `d_type` for overlay and overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) +- Deprecate `MAINTAINER` in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) +- Deprecate `filter` param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) +- Deprecate setting duplicate engine labels [#24533](https://github.com/docker/docker/pull/24533) +- Deprecate "top-level" network information in `NetworkSettings` [#28437](https://github.com/docker/docker/pull/28437) + +## 1.12.6 (2017-01-10) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix runC privilege escalation (CVE-2016-9962) + +## 1.12.5 (2016-12-15) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**NOTE**: Docker 1.12.5 will correctly validate that either an IPv6 subnet is provided or +that the IPAM driver can provide one when you specify the `--ipv6` option. + +If you are currently using the `--ipv6` option _without_ specifying the +`--fixed-cidr-v6` option, the Docker daemon will refuse to start with the +following message: + +```none +Error starting daemon: Error initializing network controller: Error creating + default "bridge" network: failed to parse pool request + for address space "LocalDefault" pool " subpool ": + could not find an available, non-overlapping IPv6 address + pool among the defaults to assign to the network +``` + +To resolve this error, either remove the `--ipv6` flag (to preserve the same +behavior as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the +value of the `--fixed-cidr-v6` flag. + +In a similar way, if you specify the `--ipv6` flag when creating a network +with the default IPAM driver, without providing an IPv6 `--subnet`, network +creation will fail with the following message: + +```none +Error response from daemon: failed to parse pool request for address space + "LocalDefault" pool "" subpool "": could not find an + available, non-overlapping IPv6 address pool among + the defaults to assign to the network +``` + +To resolve this, either remove the `--ipv6` flag (to preserve the same behavior +as in Docker 1.12.3 and earlier), or provide an IPv6 subnet as the value of the +`--subnet` flag. + +The network network creation will instead succeed if you use an external IPAM driver +which supports automatic allocation of IPv6 subnets. + +### Runtime + +- Fix race on sending stdin close event [#29424](https://github.com/docker/docker/pull/29424) + +### Networking + +- Fix panic in docker network ls when a network was created with `--ipv6` and no ipv6 `--subnet` in older docker versions [#29416](https://github.com/docker/docker/pull/29416) + +### Contrib + +- Fix compilation on Darwin [#29370](https://github.com/docker/docker/pull/29370) + +## 1.12.4 (2016-12-12) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix issue where volume metadata was not removed [#29083](https://github.com/docker/docker/pull/29083) +- Asynchronously close streams to prevent holding container lock [#29050](https://github.com/docker/docker/pull/29050) +- Fix selinux labels for newly created container volumes [#29050](https://github.com/docker/docker/pull/29050) +- Remove hostname validation [#28990](https://github.com/docker/docker/pull/28990) +- Fix deadlocks caused by IO races [#29095](https://github.com/docker/docker/pull/29095) [#29141](https://github.com/docker/docker/pull/29141) +- Return an empty stats if the container is restarting [#29150](https://github.com/docker/docker/pull/29150) +- Fix volume store locking [#29151](https://github.com/docker/docker/pull/29151) +- Ensure consistent status code in API [#29150](https://github.com/docker/docker/pull/29150) +- Fix incorrect opaque directory permission in overlay2 [#29093](https://github.com/docker/docker/pull/29093) +- Detect plugin content and error out on `docker pull` [#29297](https://github.com/docker/docker/pull/29297) + +### Swarm Mode + +* Update Swarmkit [#29047](https://github.com/docker/docker/pull/29047) + - orchestrator/global: Fix deadlock on updates [docker/swarmkit#1760](https://github.com/docker/swarmkit/pull/1760) + - on leader switchover preserve the vxlan id for existing networks [docker/swarmkit#1773](https://github.com/docker/swarmkit/pull/1773) +- Refuse swarm spec not named "default" [#29152](https://github.com/docker/docker/pull/29152) + +### Networking + +* Update libnetwork [#29004](https://github.com/docker/docker/pull/29004) [#29146](https://github.com/docker/docker/pull/29146) + - Fix panic in embedded DNS [docker/libnetwork#1561](https://github.com/docker/libnetwork/pull/1561) + - Fix unmarhalling panic when passing --link-local-ip on global scope network [docker/libnetwork#1564](https://github.com/docker/libnetwork/pull/1564) + - Fix panic when network plugin returns nil StaticRoutes [docker/libnetwork#1563](https://github.com/docker/libnetwork/pull/1563) + - Fix panic in osl.(*networkNamespace).DeleteNeighbor [docker/libnetwork#1555](https://github.com/docker/libnetwork/pull/1555) + - Fix panic in swarm networking concurrent map read/write [docker/libnetwork#1570](https://github.com/docker/libnetwork/pull/1570) + * Allow encrypted networks when running docker inside a container [docker/libnetwork#1502](https://github.com/docker/libnetwork/pull/1502) + - Do not block autoallocation of IPv6 pool [docker/libnetwork#1538](https://github.com/docker/libnetwork/pull/1538) + - Set timeout for netlink calls [docker/libnetwork#1557](https://github.com/docker/libnetwork/pull/1557) + - Increase networking local store timeout to one minute [docker/libkv#140](https://github.com/docker/libkv/pull/140) + - Fix a panic in libnetwork.(*sandbox).execFunc [docker/libnetwork#1556](https://github.com/docker/libnetwork/pull/1556) + - Honor icc=false for internal networks [docker/libnetwork#1525](https://github.com/docker/libnetwork/pull/1525) + +### Logging + +* Update syslog log driver [#29150](https://github.com/docker/docker/pull/29150) + +### Contrib + +- Run "dnf upgrade" before installing in fedora [#29150](https://github.com/docker/docker/pull/29150) +- Add build-date back to RPM packages [#29150](https://github.com/docker/docker/pull/29150) +- deb package filename changed to include distro to distinguish between distro code names [#27829](https://github.com/docker/docker/pull/27829) + +## 1.12.3 (2016-10-26) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) +- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) +- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) +* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) +* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) +- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) +- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) + +### Swarm Mode + +- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) +* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) + * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) + * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) + * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) + - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) + +### Networking + +* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) + - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) + - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) + * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) + - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) + - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) + +### Logging + +* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) + +### Contrib + +* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) +* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) + +## 1.12.2 (2016-10-11) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) +* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) +* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) +- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) +- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) +- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) +- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) + +### Networking + +- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) +* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) + * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) + - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) + * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) + * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) + * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) + * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) + * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) + - Disable service discovery in ingress network [docker/libnetwork#1489](https://github.com/docker/libnetwork/pull/1489) + +### Swarm Mode + +* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) +* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) + * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) + - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) + - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) + * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) + - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) + - Do not allow service creation on ingress network [docker/swarmkit#1600](https://github.com/docker/swarmkit/pull/1600) + +### Contrib + +* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) +* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) +- Fix installation on debian stretch [#27184](https://github.com/docker/docker/pull/27184) + +### Windows + +- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) + +## 1.12.1 (2016-08-18) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Client + +* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) +- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) +- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) +- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) +- Remove `service update --network-add` and `service update --network-rm` flags + because this feature is not yet implemented in 1.12, but was inadvertently added + to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) + +### Contrib + ++ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) +- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) + +### Networking + +- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) +- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) +- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) +- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) +- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) + +### Plugins (experimental) + +* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) +* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) +- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) +- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) + +### Runtime + +* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) +- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) +- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) + +### Security + +* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) +* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) + +### Swarm + +- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) +- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) +- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) +- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) +- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) +- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) +- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) +- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) +- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) + +### Volume + +- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) +- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) +- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) +- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) + +## 1.12.0 (2016-07-28) + + +**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + +**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two +additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for +installing docker, please make sure to update them accordingly. + +### Builder + ++ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) ++ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) ++ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) ++ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) +* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) +* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) +* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) +- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) + +### Contrib + +* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) +- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) + +### Distribution + ++ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) +* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) +* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) +* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) + +### Logging + ++ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) ++ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) ++ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) ++ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) +* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) +* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) + +### Networking + ++ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) ++ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) ++ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) ++ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) ++ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) ++ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) ++ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) ++ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) +* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) +* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) +* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) +- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) +- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) + +### Plugins (experimental) + ++ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) + +### Remote API (v1.24) & Client + ++ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) ++ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) ++ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) ++ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) ++ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) ++ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) ++ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) ++ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) +* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) +- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) +- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) +- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) +- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) +- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) + +### Runtime + ++ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) ++ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) ++ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) ++ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) ++ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) ++ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) ++ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) ++ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) ++ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) ++ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) ++ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) ++ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) ++ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) ++ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) ++ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) ++ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) +* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) +* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) +- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) +- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) +- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) +- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) +- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) +- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) +- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) +- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) +- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) +- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) +- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) +- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) +- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) + +### Swarm Mode + ++ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) ++ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) ++ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) + +### Volume + ++ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) ++ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) ++ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) +* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) +- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) +- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) + + +### DEPRECATION +* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed + to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) +* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) +* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) +* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) +* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) +* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) +* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) + +## 1.11.2 (2016-05-31) + +### Networking + +- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) +- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) + +### Runtime + +- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) +- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) +- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) +- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) +- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) +- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) +- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) +- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) +- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) + + +## 1.11.1 (2016-04-26) + +### Distribution + +- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) + +### Documentation + ++ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) + +### Builder + +* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) + +### Networking + +- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) +- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) + +### Runtime + +- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) +- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) +- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) +- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) +- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) +- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) +- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) +- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) +- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) +- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) +- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` +- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) +- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) + +## 1.11.0 (2016-04-13) + +**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. + +### Builder + +- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) +- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) + +### Client + +* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) ++ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) +* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) +* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) +- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) +- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) +* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) +- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) ++ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) ++ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) +* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) +* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) +- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) +* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) +* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) +- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) +* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) + +### Distribution + +- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) +- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) ++ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) ++ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) +* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) +* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) +* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) +* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) +- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) +- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) + +### Logging + +- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) +* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) +* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) +* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) ++ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) +* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) ++ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) ++ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) + + +### Misc + ++ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) ++ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) ++ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) +* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) +- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) +- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) +* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) +* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) +* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) +* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) ++ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) ++ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) + +### Networking + +- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) +- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) +* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) ++ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) +* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) +- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) +* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) ++ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) +* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) +- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) +* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) +- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) +- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) +- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) +- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) +- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) +- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) +- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) +- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) +- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) +- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) + +### Plugins + +- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) +- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) + +### Runtime + +- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) +- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) +- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) +- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) + Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. ++ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) ++ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) ++ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) +* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) +* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) +- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) +- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) +- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) +- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) +- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) +- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) +* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) +- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) +* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) ++ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) +- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) ++ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) +- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) ++ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) ++ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) +- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) +* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) +- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) +* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) +* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) ++ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) +- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) +- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) +- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) +- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) + +### Security + +* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) +* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) +* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) +* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) +* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) + +### Volumes + +* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) +* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) +- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) ++ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) + +## 1.10.3 (2016-03-10) + +### Runtime + +- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) +- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) + +### Distribution + +- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) +- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) + +### Plugin system + +- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) +- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) +- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) + +### Security + +- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) + It was due to the `personality` syscall being blocked by the default seccomp profile. +- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) + It was due to the `ipc` syscall being blocked by the default seccomp profile. +- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) +- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) + +## 1.10.2 (2016-02-22) + +### Runtime + +- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) +- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) +- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) +- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) +- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) + +### Distribution + +- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) +- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) +- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) +- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) + +### Networking + +- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) + +### Volumes + +- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) + +### Security + +- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) + +## 1.10.1 (2016-02-11) + +### Runtime + +* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) +- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) +- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) +- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) +- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) + +### Security + +- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) + +### Distribution + +* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) +- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) + +### Networking + +- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) +- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) +- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) +- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) + +### Logging + +- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) + +### Volumes + +- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) + +### Misc + +- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) + +## 1.10.0 (2016-02-04) + +**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. +A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. +Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. +Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ + +### Runtime + ++ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) ++ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) ++ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) ++ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) ++ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) + This change is backward compatible in the API, but not on the CLI. ++ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) ++ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) ++ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) ++ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) ++ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) ++ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) ++ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) ++ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) ++ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) ++ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) ++ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) +* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) +* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) +* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) +* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) +* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) +* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) +* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) +* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) +- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) +- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) +- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) +- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) +- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) +- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) +- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) + +### Security + ++ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) ++ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) ++ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) ++ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) ++ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) + This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. + Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. +* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) + +### Distribution + +* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) + Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. + Images no longer depend on the parent chain but contain a list of layer references. + `docker load`/`docker save` tarballs now also contain content-addressable image configurations. + For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration +* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) +* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) +* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) +- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) + +### Networking + ++ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) ++ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) ++ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) ++ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) ++ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) ++ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) ++ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) ++ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) ++ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) ++ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) ++ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) +* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) +* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) +* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) +* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) +* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) +* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) +* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) +* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) +- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) +- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) +- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) +- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) + +### Logging + ++ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) ++ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) +* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) +* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) + +### Volumes + ++ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) +* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) + Existing plugins need to make use of these new APIs to satisfy users' expectation + For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) +- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) +- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) +- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) +- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) +- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) + +### Builder + ++ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) +- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) +- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) + +### Client + ++ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) +- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) + +### Misc + +* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) + +### Deprecations + +* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) +* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) +* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) +* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) +* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) +* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) + +## 1.9.1 (2015-11-21) + +### Runtime + +- Do not prevent daemon from booting if images could not be restored (#17695) +- Force IPC mount to unmount on daemon shutdown/init (#17539) +- Turn IPC unmount errors into warnings (#17554) +- Fix `docker stats` performance regression (#17638) +- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) +- Fix seldom panics (#17639, #17634, #17703) +- Fix opq whiteouts problems for files with dot prefix (#17819) +- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) +- devicemapper: fix displayed fs in docker info (#17974) +- selinux: only relabel if user requested so with the `z` option (#17450, #17834) +- Do not make network calls when normalizing names (#18014) + +### Client + +- Fix `docker login` on windows (#17738) +- Fix bug with `docker inspect` output when not connected to daemon (#17715) +- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) + +### Builder + +- Fix regression with symlink behavior in ADD/COPY (#17710) + +### Networking + +- Allow passing a network ID as an argument for `--net` (#17558) +- Fix connect to host and prevent disconnect from host for `host` network (#17476) +- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is + not the first block in the network (#17853) +- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) +- Allow port-mapping only for endpoints created on docker run (#17858) +- Fixed an endpoint delete issue with a possible stale sbox (#18102) + +### Distribution + +- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) + +## 1.9.0 (2015-11-03) + +### Runtime + ++ `docker stats` now returns block IO metrics (#15005) ++ `docker stats` now details network stats per interface (#15786) ++ Add `ancestor=` filter to `docker ps --filter` flag to filter +containers based on their ancestor images (#14570) ++ Add `label=` filter to `docker ps --filter` to filter containers +based on label (#16530) ++ Add `--kernel-memory` flag to `docker run` (#14006) ++ Add `--message` flag to `docker import` allowing to specify an optional +message (#15711) ++ Add `--privileged` flag to `docker exec` (#14113) ++ Add `--stop-signal` flag to `docker run` allowing to replace the container +process stopping signal (#15307) ++ Add a new `unless-stopped` restart policy (#15348) ++ Inspecting an image now returns tags (#13185) ++ Add container size information to `docker inspect` (#15796) ++ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) +- Remove the deprecated `/container/ps` endpoint from the API (#15972) +- Send and document correct HTTP codes for `/exec//start` (#16250) +- Share shm and mqueue between containers sharing IPC namespace (#15862) +- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) +- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted +with `ro` option (#14965) +- Improve `rmi` performance (#16890) +- Do not update /etc/hosts for the default bridge network, except for links (#17325) +- Fix conflict with duplicate container names (#17389) +- Fix an issue with incorrect template execution in `docker inspect` (#17284) +- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) + +### Client + ++ Allow `docker import` to import from local files (#11907) + +### Builder + ++ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different +stop-signal for the container process (#15307) ++ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` +that allows to add build-time environment variables (#15182) +- Improve cache miss performance (#16890) + +### Storage + +- devicemapper: Implement deferred deletion capability (#16381) + +## Networking + ++ `docker network` exits experimental and is part of standard release (#16645) ++ New network top-level concept, with associated subcommands and API (#16645) + WARNING: the API is different from the experimental API ++ Support for multiple isolated/micro-segmented networks (#16645) ++ Built-in multihost networking using VXLAN based overlay driver (#14071) ++ Support for third-party network plugins (#13424) ++ Ability to dynamically connect containers to multiple networks (#16645) ++ Support for user-defined IP address management via pluggable IPAM drivers (#16910) ++ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) ++ Add `--cluster-store-opt` for setting up TLS settings (#16644) ++ Add `--dns-opt` to the daemon (#16031) +- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, + `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. + Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect + the networking settings of a container per network. + +### Volumes + ++ New top-level `volume` subcommand and API (#14242) +- Move API volume driver settings to host-specific config (#15798) +- Print an error message if volume name is not unique (#16009) +- Ensure volumes created from Dockerfiles always use the local volume driver +(#15507) +- DEPRECATE auto-creating missing host paths for bind mounts (#16349) + +### Logging + ++ Add `awslogs` logging driver for Amazon CloudWatch (#15495) ++ Add generic `tag` log option to allow customizing container/image +information passed to driver (e.g. show container names) (#15384) +- Implement the `docker logs` endpoint for the journald driver (#13707) +- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) + +### Distribution + ++ `docker search` now works with partial names (#16509) +- Push optimization: avoid buffering to file (#15493) +- The daemon will display progress for images that were already being pulled +by another client (#15489) +- Only permissions required for the current action being performed are requested (#) ++ Renaming trust keys (and respective environment variables) from `offline` to +`root` and `tagging` to `repository` (#16894) +- DEPRECATE trust key environment variables +`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and +`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) + +### Security + ++ Add SELinux profiles to the rpm package (#15832) +- Fix various issues with AppArmor profiles provided in the deb package +(#14609) +- Add AppArmor policy that prevents writing to /proc (#15571) + +## 1.8.3 (2015-10-12) + +### Distribution + +- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) +- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) ++ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry + +## 1.8.2 (2015-09-10) + +### Distribution + +- Fixes rare edge case of handling GNU LongLink and LongName entries. +- Fix ^C on docker pull. +- Fix docker pull issues on client disconnection. +- Fix issue that caused the daemon to panic when loggers weren't configured properly. +- Fix goroutine leak pulling images from registry V2. + +### Runtime + +- Fix a bug mounting cgroups for docker daemons running inside docker containers. +- Initialize log configuration properly. + +### Client: + +- Handle `-q` flag in `docker ps` properly when there is a default format. + +### Networking + +- Fix several corner cases with netlink. + +### Contrib + +- Fix several issues with bash completion. + +## 1.8.1 (2015-08-12) + +### Distribution + +* Fix a bug where pushing multiple tags would result in invalid images + +## 1.8.0 (2015-08-11) + +### Distribution + ++ Trusted pull, push and build, disabled by default +* Make tar layers deterministic between registries +* Don't allow deleting the image of running containers +* Check if a tag name to load is a valid digest +* Allow one character repository names +* Add a more accurate error description for invalid tag name +* Make build cache ignore mtime + +### Cli + ++ Add support for DOCKER_CONFIG/--config to specify config file dir ++ Add --type flag for docker inspect command ++ Add formatting options to `docker ps` with `--format` ++ Replace `docker -d` with new subcommand `docker daemon` +* Zsh completion updates and improvements +* Add some missing events to bash completion +* Support daemon urls with base paths in `docker -H` +* Validate status= filter to docker ps +* Display when a container is in --net=host in docker ps +* Extend docker inspect to export image metadata related to graph driver +* Restore --default-gateway{,-v6} daemon options +* Add missing unpublished ports in docker ps +* Allow duration strings in `docker events` as --since/--until +* Expose more mounts information in `docker inspect` + +### Runtime + ++ Add new Fluentd logging driver ++ Allow `docker import` to load from local files ++ Add logging driver for GELF via UDP ++ Allow to copy files from host to containers with `docker cp` ++ Promote volume drivers from experimental to master ++ Add rollover options to json-file log driver, and --log-driver-opts flag ++ Add memory swappiness tuning options +* Remove cgroup read-only flag when privileged +* Make /proc, /sys, & /dev readonly for readonly containers +* Add cgroup bind mount by default +* Overlay: Export metadata for container and image in `docker inspect` +* Devicemapper: external device activation +* Devicemapper: Compare uuid of base device on startup +* Remove RC4 from the list of registry cipher suites +* Add syslog-facility option +* LXC execdriver compatibility with recent LXC versions +* Mark LXC execriver as deprecated (to be removed with the migration to runc) + +### Plugins + +* Separate plugin sockets and specs locations +* Allow TLS connections to plugins + +### Bug fixes + +- Add missing 'Names' field to /containers/json API output +- Make `docker rmi` of dangling images safe while pulling +- Devicemapper: Change default basesize to 100G +- Go Scheduler issue with sync.Mutex and gcc +- Fix issue where Search API endpoint would panic due to empty AuthConfig +- Set image canonical names correctly +- Check dockerinit only if lxc driver is used +- Fix ulimit usage of nproc +- Always attach STDIN if -i,--interactive is specified +- Show error messages when saving container state fails +- Fixed incorrect assumption on --bridge=none treated as disable network +- Check for invalid port specifications in host configuration +- Fix endpoint leave failure for --net=host mode +- Fix goroutine leak in the stats API if the container is not running +- Check for apparmor file before reading it +- Fix DOCKER_TLS_VERIFY being ignored +- Set umask to the default on startup +- Correct the message of pause and unpause a non-running container +- Adjust disallowed CpuShares in container creation +- ZFS: correctly apply selinux context +- Display empty string instead of when IP opt is nil +- `docker kill` returns error when container is not running +- Fix COPY/ADD quoted/json form +- Fix goroutine leak on logs -f with no output +- Remove panic in nat package on invalid hostport +- Fix container linking in Fedora 22 +- Fix error caused using default gateways outside of the allocated range +- Format times in inspect command with a template as RFC3339Nano +- Make registry client to accept 2xx and 3xx http status responses as successful +- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. +- Fix error when the docker ps format was not valid. +- Remove redundant ip forward check. +- Fix issue trying to push images to repository mirrors. +- Fix error cleaning up network entrypoints when there is an initialization issue. + +## 1.7.1 (2015-07-14) + +#### Runtime + +- Fix default user spawning exec process with `docker exec` +- Make `--bridge=none` not to configure the network bridge +- Publish networking stats properly +- Fix implicit devicemapper selection with static binaries +- Fix socket connections that hung intermittently +- Fix bridge interface creation on CentOS/RHEL 6.6 +- Fix local dns lookups added to resolv.conf +- Fix copy command mounting volumes +- Fix read/write privileges in volumes mounted with --volumes-from + +#### Remote API + +- Fix unmarshalling of Command and Entrypoint +- Set limit for minimum client version supported +- Validate port specification +- Return proper errors when attach/reattach fail + +#### Distribution + +- Fix pulling private images +- Fix fallback between registry V2 and V1 + +## 1.7.0 (2015-06-16) + +#### Runtime ++ Experimental feature: support for out-of-process volume plugins +* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag +* The `exec` command supports the `-u|--user` flag to specify the new process owner ++ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags ++ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` ++ Container block IO can be controlled in `docker run` using`--blkio-weight` ++ ZFS support ++ The `docker logs` command supports a `--since` argument ++ UTS namespace can be shared with the host with `docker run --uts=host` + +#### Quality +* Networking stack was entirely rewritten as part of the libnetwork effort +* Engine internals refactoring +* Volumes code was entirely rewritten to support the plugins effort ++ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting + +#### Build ++ Support ${variable:-value} and ${variable:+value} syntax for environment variables ++ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` ++ git context changes with branches and directories +* The .dockerignore file support exclusion rules + +#### Distribution ++ Client support for v2 mirroring support for the official registry + +#### Bugfixes +* Firewalld is now supported and will automatically be used when available +* mounting --device recursively + +## 1.6.2 (2015-05-13) + +#### Runtime +- Revert change prohibiting mounting into /sys + +## 1.6.1 (2015-05-07) + +#### Security +- Fix read/write /proc paths (CVE-2015-3630) +- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) +- Fix opening of file-descriptor 1 (CVE-2015-3627) +- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) +- Prohibit mount of /sys + +#### Runtime +- Update AppArmor policy to not allow mounts + +## 1.6.0 (2015-04-07) + +#### Builder ++ Building images from an image ID ++ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` ++ `commit --change` to apply specified Dockerfile instructions while committing the image ++ `import --change` to apply specified Dockerfile instructions while importing the image ++ Builds no longer continue in the background when canceled with CTRL-C + +#### Client ++ Windows Support + +#### Runtime ++ Container and image Labels ++ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within ++ Logging drivers, `json-file`, `syslog`, or `none` ++ Pulling images by ID ++ `--ulimit` to set the ulimit on a container ++ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) + +## 1.5.0 (2015-02-10) + +#### Builder ++ Dockerfile to use for a given `docker build` can be specified with the `-f` flag +* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache +* ADD and COPY instructions accept relative paths +* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier +* Improve performance when exposing a large number of ports + +#### Hack ++ Allow client-side only integration tests for Windows +* Include docker-py integration tests against Docker daemon as part of our test suites + +#### Packaging ++ Support for the new version of the registry HTTP API +* Speed up `docker push` for images with a majority of already existing layers +- Fixed contacting a private registry through a proxy + +#### Remote API ++ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command ++ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command +* Container `inspect` endpoint show the ID of `exec` commands running in this container +* Container `inspect` endpoint show the number of times Docker auto-restarted the container +* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' +- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes + +#### Runtime ++ Docker daemon has full IPv6 support ++ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools ++ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted ++ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag +* Major stability improvements for devicemapper storage driver +* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted +* Better integration with host system: per-container iptable rules are moved to the DOCKER chain +- Fixed container exiting on out of memory to return an invalid exit code + +#### Other +* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon + +## 1.4.1 (2014-12-15) + +#### Runtime +- Fix issue with volumes-from and bind mounts not being honored after create + +## 1.4.0 (2014-12-11) + +#### Notable Features since 1.3.0 ++ Set key=value labels to the daemon (displayed in `docker info`), applied with + new `-label` daemon flag ++ Add support for `ENV` in Dockerfile of the form: + `ENV name=value name2=value2...` ++ New Overlayfs Storage Driver ++ `docker info` now returns an `ID` and `Name` field ++ Filter events by event name, container, or image ++ `docker cp` now supports copying from container volumes +- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing + image. + +## 1.3.3 (2014-12-11) + +#### Security +- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) +- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) +- Validate image IDs (CVE-2014-9358) + +#### Runtime +- Fix an issue when image archives are being read slowly + +#### Client +- Fix a regression related to stdin redirection +- Fix a regression with `docker cp` when destination is the current directory + +## 1.3.2 (2014-11-20) + +#### Security +- Fix tar breakout vulnerability +* Extractions are now sandboxed chroot +- Security options are no longer committed to images + +#### Runtime +- Fix deadlock in `docker ps -f exited=1` +- Fix a bug when `--volumes-from` references a container that failed to start + +#### Registry ++ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 +* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag +- Skip the experimental registry v2 API when mirroring is enabled + +## 1.3.1 (2014-10-28) + +#### Security +* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry ++ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified + +#### Runtime +- Fix issue where volumes would not be shared + +#### Client +- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` +- Fix docker run output to non-TTY stdout + +#### Builder +- Fix escaping `$` for environment variables +- Fix issue with lowercase `onbuild` Dockerfile instruction +- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` + +## 1.3.0 (2014-10-14) + +#### Notable features since 1.2.0 ++ Docker `exec` allows you to run additional processes inside existing containers ++ Docker `create` gives you the ability to create a container via the CLI without executing a process ++ `--security-opts` options to allow user to customize container labels and apparmor profiles ++ Docker `ps` filters +- Wildcard support to COPY/ADD ++ Move production URLs to get.docker.com from get.docker.io ++ Allocate IP address on the bridge inside a valid CIDR ++ Use drone.io for PR and CI testing ++ Ability to setup an official registry mirror ++ Ability to save multiple images with docker `save` + +## 1.2.0 (2014-08-20) + +#### Runtime ++ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime ++ Auto-restart containers using policies ++ Use /var/lib/docker/tmp for large temporary files ++ `--cap-add` and `--cap-drop` to tweak what linux capability you want ++ `--device` to use devices in containers + +#### Client ++ `docker search` on private registries ++ Add `exited` filter to `docker ps --filter` +* `docker rm -f` now kills instead of stop ++ Support for IPv6 addresses in `--dns` flag + +#### Proxy ++ Proxy instances in separate processes +* Small bug fix on UDP proxy + +## 1.1.2 (2014-07-23) + +#### Runtime ++ Fix port allocation for existing containers ++ Fix containers restart on daemon restart + +#### Packaging ++ Fix /etc/init.d/docker issue on Debian + +## 1.1.1 (2014-07-09) + +#### Builder +* Fix issue with ADD + +## 1.1.0 (2014-07-03) + +#### Notable features since 1.0.1 ++ Add `.dockerignore` support ++ Pause containers during `docker commit` ++ Add `--tail` to `docker logs` + +#### Builder ++ Allow a tar file as context for `docker build` +* Fix issue with white-spaces and multi-lines in `Dockerfiles` + +#### Runtime +* Overall performance improvements +* Allow `/` as source of `docker run -v` +* Fix port allocation +* Fix bug in `docker save` +* Add links information to `docker inspect` + +#### Client +* Improve command line parsing for `docker commit` + +#### Remote API +* Improve status code for the `start` and `stop` endpoints + +## 1.0.1 (2014-06-19) + +#### Notable features since 1.0.0 +* Enhance security for the LXC driver + +#### Builder +* Fix `ONBUILD` instruction passed to grandchildren + +#### Runtime +* Fix events subscription +* Fix /etc/hostname file with host networking +* Allow `-h` and `--net=none` +* Fix issue with hotplug devices in `--privileged` + +#### Client +* Fix artifacts with events +* Fix a panic with empty flags +* Fix `docker cp` on Mac OS X + +#### Miscellaneous +* Fix compilation on Mac OS X +* Fix several races + +## 1.0.0 (2014-06-09) + +#### Notable features since 0.12.0 +* Production support + +## 0.12.0 (2014-06-05) + +#### Notable features since 0.11.0 +* 40+ various improvements to stability, performance and usability +* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file +* Inherit file permissions from the host on `ADD` +* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer +* The `images` command has a `-f`/`--filter` option to filter the list of images +* Add `--force-rm` to clean up after a failed build +* Standardize JSON keys in Remote API to CamelCase +* Pull from a docker run now assumes `latest` tag if not specified +* Enhance security on Linux capabilities and device nodes + +## 0.11.1 (2014-05-07) + +#### Registry +- Fix push and pull to private registry + +## 0.11.0 (2014-05-07) + +#### Notable features since 0.10.0 + +* SELinux support for mount and process labels +* Linked containers can be accessed by hostname +* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces +* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon +* Logs can now be returned with an optional timestamp +* Docker now works with registries that support SHA-512 +* Multiple registry endpoints are supported to allow registry mirrors + +## 0.10.0 (2014-04-08) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. +- Follow symlinks inside container's root for ADD build instructions. +- Fix EXPOSE caching. + +#### Documentation +- Add the new options of `docker ps` to the documentation. +- Add the options of `docker restart` to the documentation. +- Update daemon docs and help messages for --iptables and --ip-forward. +- Updated apt-cacher-ng docs example. +- Remove duplicate description of --mtu from docs. +- Add missing -t and -v for `docker images` to the docs. +- Add fixes to the cli docs. +- Update libcontainer docs. +- Update images in docs to remove references to AUFS and LXC. +- Update the nodejs_web_app in the docs to use the new epel RPM address. +- Fix external link on security of containers. +- Update remote API docs. +- Add image size to history docs. +- Be explicit about binding to all interfaces in redis example. +- Document DisableNetwork flag in the 1.10 remote api. +- Document that `--lxc-conf` is lxc only. +- Add chef usage documentation. +- Add example for an image with multiple for `docker load`. +- Explain what `docker run -a` does in the docs. + +#### Contrib +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. +- Add check-config script to contrib. +- Fix fish shell completion. + +#### Hack +* Clean up "go test" output from "make test" to be much more readable/scannable. +* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. ++ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. +- Include contributed completions in Ubuntu PPA. ++ Add cli integration tests. +* Add tweaks to the hack scripts to make them simpler. + +#### Remote API ++ Add TLS auth support for API. +* Move git clone from daemon to client. +- Fix content-type detection in docker cp. +* Split API into 2 go packages. + +#### Runtime +* Support hairpin NAT without going through Docker server. +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). +- devicemapper: increase timeout in waitClose to 10 seconds. +- devicemapper: ensure we shut down thin pool cleanly. +- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. +- devicemapper: avoid AB-BA deadlock. +- devicemapper: make shutdown better/faster. +- improve alpha sorting in mflag. +- Remove manual http cookie management because the cookiejar is being used. +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Add FreeBSD support for the client. +- Merge auth package into registry. +- Add deprecation warning for -t on `docker pull`. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. +- Fix attach exit on darwin. +- Improve deprecation message. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Only unshare the mount namespace for execin. +- Merge existing config when committing. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Mount cgroups automatically if they're not mounted already. +- Use mock for search tests. +- Update to double-dash everywhere. +- Move .dockerenv parsing to lxc driver. +- Move all bind-mounts in the container inside the namespace. +- Don't use separate bind mount for container. +- Always symlink /dev/ptmx for libcontainer. +- Don't kill by pid for other drivers. +- Add initial logging to libcontainer. +* Sort by port in `docker ps`. +- Move networking drivers into runtime top level package. ++ Add --no-prune to `docker rmi`. ++ Add time since exit in `docker ps`. +- graphdriver: add build tags. +- Prevent allocation of previously allocated ports & prevent improve port allocation. +* Add support for --since/--before in `docker ps`. +- Clean up container stop. ++ Add support for configurable dns search domains. +- Add support for relative WORKDIR instructions. +- Add --output flag for docker save. +- Remove duplication of DNS entries in config merging. +- Add cpuset.cpus to cgroups and native driver options. +- Remove docker-ci. +- Promote btrfs. btrfs is no longer considered experimental. +- Add --input flag to `docker load`. +- Return error when existing bridge doesn't match IP address. +- Strip comments before parsing line continuations to avoid interpreting instructions as comments. +- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. +- Add systemd implementation of cgroups and make containers show up as systemd units. +- Fix commit and import when no repository is specified. +- Remount /var/lib/docker as --private to fix scaling issue. +- Use the environment's proxy when pinging the remote registry. +- Reduce error level from harmless errors. +* Allow --volumes-from to be individual files. +- Fix expanding buffer in StdCopy. +- Set error regardless of attach or stdin. This fixes #3364. +- Add support for --env-file to load environment variables from files. +- Symlink /etc/mtab and /proc/mounts. +- Allow pushing a single tag. +- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. +- Don't throw error when starting an already running container. +- Fix dynamic port allocation limit. +- remove setupDev from libcontainer. +- Add API version to `docker version`. +- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. +- Fix --volumes-from mount failure. +- Allow non-privileged containers to create device nodes. +- Skip login tests because of external dependency on a hosted service. +- Deprecate `docker images --tree` and `docker images --viz`. +- Deprecate `docker insert`. +- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. +- Add specific error message when hitting 401 over HTTP on push. +- Fix absolute volume check. +- Remove volumes-from from the config. +- Move DNS options to hostconfig. +- Update the apparmor profile for libcontainer. +- Add deprecation notice for `docker commit -run`. + +## 0.9.1 (2014-03-24) + +#### Builder +- Fix printing multiple messages on a single line. Fixes broken output during builds. + +#### Documentation +- Fix external link on security of containers. + +#### Contrib +- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. +- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. + +#### Hack +- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. + +#### Remote API +- Fix content-type detection in `docker cp`. + +#### Runtime +- Use BSD raw mode on Darwin. Fixes nano, tmux and others. +- Only unshare the mount namespace for execin. +- Retry to retrieve the layer metadata up to 5 times for `docker pull`. +- Merge existing config when committing. +- Fix panic in monitor. +- Disable daemon startup timeout. +- Fix issue #4681: add loopback interface when networking is disabled. +- Add failing test case for issue #4681. +- Send SIGTERM to child, instead of SIGKILL. +- Show the driver and the kernel version in `docker info` even when not in debug mode. +- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. +- Fix issue caused by the absence of /etc/apparmor.d. +- Don't leave empty cidFile behind when failing to create the container. +- Improve deprecation message. +- Fix attach exit on darwin. +- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). +- devicemapper: succeed immediately when removing non-existent devices. +- devicemapper: increase timeout in waitClose to 10 seconds. +- Remove goroutine leak on error. +- Update parseLxcInfo to comply with new lxc1.0 format. + +## 0.9.0 (2014-03-10) + +#### Builder +- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. +- Add error to docker build --rm. This adds missing error handling. +- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. +- Make `--rm` the default for `docker build`. + +#### Documentation +- Download the docker client binary for Mac over https. +- Update the titles of the install instructions & descriptions. +* Add instructions for upgrading boot2docker. +* Add port forwarding example in OS X install docs. +- Attempt to disentangle repository and registry. +- Update docs to explain more about `docker ps`. +- Update sshd example to use a Dockerfile. +- Rework some examples, including the Python examples. +- Update docs to include instructions for a container's lifecycle. +- Update docs documentation to discuss the docs branch. +- Don't skip cert check for an example & use HTTPS. +- Bring back the memory and swap accounting section which was lost when the kernel page was removed. +- Explain DNS warnings and how to fix them on systems running and using a local nameserver. + +#### Contrib +- Add Tanglu support for mkimage-debootstrap. +- Add SteamOS support for mkimage-debootstrap. + +#### Hack +- Get package coverage when running integration tests. +- Remove the Vagrantfile. This is being replaced with boot2docker. +- Fix tests on systems where aufs isn't available. +- Update packaging instructions and remove the dependency on lxc. + +#### Remote API +* Move code specific to the API to the api package. +- Fix header content type for the API. Makes all endpoints use proper content type. +- Fix registry auth & remove ping calls from CmdPush and CmdPull. +- Add newlines to the JSON stream functions. + +#### Runtime +* Do not ping the registry from the CLI. All requests to registries flow through the daemon. +- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. +- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. +- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. +* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. +- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. +- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. +- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. +- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. +- Fix custom bridge related options. This makes custom bridges work again. ++ Mount-bind the PTY as container console. This allows tmux/screen to run. ++ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. ++ Add native exec driver which uses libcontainer and make it the default exec driver. +- Add support for handling extended attributes in archives. +* Set the container MTU to be the same as the host MTU. ++ Add simple sha256 checksums for layers to speed up `docker push`. +* Improve kernel version parsing. +* Allow flag grouping (`docker run -it`). +- Remove chroot exec driver. +- Fix divide by zero to fix panic. +- Rewrite `docker rmi`. +- Fix docker info with lxc 1.0.0. +- Fix fedora tty with apparmor. +* Don't always append env vars, replace defaults with vars from config. +* Fix a goroutine leak. +* Switch to Go 1.2.1. +- Fix unique constraint error checks. +* Handle symlinks for Docker's data directory and for TMPDIR. +- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) +- Add apparmor profile for the native execution driver. +* Move system specific code from archive to pkg/system. +- Fix duplicate signal for `docker run -i -t` (issue #3336). +- Return correct process pid for lxc. +- Add a -G option to specify the group which unix sockets belong to. ++ Add `-f` flag to `docker rm` to force removal of running containers. ++ Kill ghost containers and restart all ghost containers when the docker daemon restarts. ++ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. + +## 0.8.1 (2014-02-18) + +#### Builder + +- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system +- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported + +#### Documentation + +* Update issue filing instructions +* Warn against the use of symlinks for Docker's storage folder +* Replace the Firefox example with an IceWeasel example +* Rewrite the PostgreSQL example using a Dockerfile and add more details to it +* Improve the OS X documentation + +#### Remote API + +- Fix broken images API for version less than 1.7 +- Use the right encoding for all API endpoints which return JSON +- Move remote api client to api/ +- Queue calls to the API using generic socket wait + +#### Runtime + +- Fix the use of custom settings for bridges and custom bridges +- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures +- Remove two panics which could make Docker crash in some situations +- Don't ping registry from the CLI client +- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks +- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration +- Remove directory when removing devicemapper device. This cleans up leftover mount directories +- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration +- Ensure `docker cp` stream is closed properly +- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper +- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port ++ Mount-bind the PTY as container console. This allows tmux and screen to run in a container +- Clean up archive closing. This fixes and improves archive handling +- Fix engine tests on systems where temp directories are symlinked +- Add test methods for save and load +- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart +- Support submodules when building from a GitHub repository +- Quote volume path to allow spaces +- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs + +## 0.8.0 (2014-02-04) + +#### Notable features since 0.7.0 + +* Images and containers can be removed much faster +* Building an image from source with docker build is now much faster +* The Docker daemon starts and stops much faster +* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations +* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations +* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar +* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers +With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages +* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change + +* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed +* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build +* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write +* Docker is officially supported on Mac OS X +* The Docker daemon supports systemd socket activation + +## 0.7.6 (2014-01-14) + +#### Builder + +* Do not follow symlink outside of build context + +#### Runtime + +- Remount bind mounts when ro is specified +* Use https for fetching docker version + +#### Other + +* Inline the test.docker.io fingerprint +* Add ca-certificates to packaging documentation + +## 0.7.5 (2014-01-09) + +#### Builder + +* Disable compression for build. More space usage but a much faster upload +- Fix ADD caching for certain paths +- Do not compress archive from git build + +#### Documentation + +- Fix error in GROUP add example +* Make sure the GPG fingerprint is inline in the documentation +* Give more specific advice on setting up signing of commits for DCO + +#### Runtime + +- Fix misspelled container names +- Do not add hostname when networking is disabled +* Return most recent image from the cache by date +- Return all errors from docker wait +* Add Content-Type Header "application/json" to GET /version and /info responses + +#### Other + +* Update DCO to version 1.1 ++ Update Makefile to use "docker:GIT_BRANCH" as the generated image name +* Update Travis to check for new 1.1 DCO version + +## 0.7.4 (2014-01-07) + +#### Builder + +- Fix ADD caching issue with . prefixed path +- Fix docker build on devicemapper by reverting sparse file tar option +- Fix issue with file caching and prevent wrong cache hit +* Use same error handling while unmarshalling CMD and ENTRYPOINT + +#### Documentation + +* Simplify and streamline Amazon Quickstart +* Install instructions use unprefixed Fedora image +* Update instructions for mtu flag for Docker on GCE ++ Add Ubuntu Saucy to installation +- Fix for wrong version warning on master instead of latest + +#### Runtime + +- Only get the image's rootfs when we need to calculate the image size +- Correctly handle unmapping UDP ports +* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build +- Fix login message to say pull instead of push +- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN +* Make blank -H option default to the same as no -H was sent +* Extract cgroups utilities to own submodule + +#### Other + ++ Add Travis CI configuration to validate DCO and gofmt requirements ++ Add Developer Certificate of Origin Text +* Upgrade VBox Guest Additions +* Check standalone header when pinging a registry server + +## 0.7.3 (2014-01-02) + +#### Builder + ++ Update ADD to use the image cache, based on a hash of the added content +* Add error message for empty Dockerfile + +#### Documentation + +- Fix outdated link to the "Introduction" on www.docker.io ++ Update the docs to get wider when the screen does +- Add information about needing to install LXC when using raw binaries +* Update Fedora documentation to disentangle the docker and docker.io conflict +* Add a note about using the new `-mtu` flag in several GCE zones ++ Add FrugalWare installation instructions ++ Add a more complete example of `docker run` +- Fix API documentation for creating and starting Privileged containers +- Add missing "name" parameter documentation on "/containers/create" +* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration +- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 + +#### Hack + +- Add missing libdevmapper dependency to the packagers documentation +* Update minimum Go requirement to a hard line at Go 1.2+ +* Many minor improvements to the Vagrantfile ++ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) ++ Add coverprofile generation reporting +- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually +* Update Dockerfile to be more canonical and have less spurious warnings during build +- Fix some miscellaneous `docker pull` progress bar display issues +* Migrate more miscellaneous packages under the "pkg" folder +* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" +* Reorganize syntax highlighting files under a common "contrib/syntax" directory +* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation +* Add support for container names in bash completion + +#### Packaging + ++ Add an official Docker client binary for Darwin (Mac OS X) +* Remove empty "Vendor" string and added "License" on deb package ++ Add a stubbed version of "/etc/default/docker" in the deb package + +#### Runtime + +* Update layer application to extract tars in place, avoiding file churn while handling whiteouts +- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) +* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) ++ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions +- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files +* Update container name validation to include '.' +- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected +* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler +* Update to use proper box-drawing characters everywhere in `docker images -tree` +* Move MTU setting from LXC configuration to directly use netlink +* Add `-S` option to external tar invocation for more efficient spare file handling ++ Add arch/os info to User-Agent string, especially for registry requests ++ Add `-mtu` option to Docker daemon for configuring MTU +- Fix `docker build` to exit with a non-zero exit code on error ++ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation + +## 0.7.2 (2013-12-16) + +#### Runtime + ++ Validate container names on creation with standard regex +* Increase maximum image depth to 127 from 42 +* Continue to move api endpoints to the job api ++ Add -bip flag to allow specification of dynamic bridge IP via CIDR +- Allow bridge creation when ipv6 is not enabled on certain systems +* Set hostname and IP address from within dockerinit +* Drop capabilities from within dockerinit +- Fix volumes on host when symlink is present the image +- Prevent deletion of image if ANY container is depending on it even if the container is not running +* Update docker push to use new progress display +* Use os.Lstat to allow mounting unix sockets when inspecting volumes +- Adjust handling of inactive user login +- Add missing defines in devicemapper for older kernels +- Allow untag operations with no container validation +- Add auth config to docker build + +#### Documentation + +* Add more information about Docker logging ++ Add RHEL documentation +* Add a direct example for changing the CMD that is run in a container +* Update Arch installation documentation ++ Add section on Trusted Builds ++ Add Network documentation page + +#### Other + ++ Add new cover bundle for providing code coverage reporting +* Separate integration tests in bundles +* Make Tianon the hack maintainer +* Update mkimage-debootstrap with more tweaks for keeping images small +* Use https to get the install script +* Remove vendored dotcloud/tar now that Go 1.2 has been released + +## 0.7.1 (2013-12-05) + +#### Documentation + ++ Add @SvenDowideit as documentation maintainer ++ Add links example ++ Add documentation regarding ambassador pattern ++ Add Google Cloud Platform docs ++ Add dockerfile best practices +* Update doc for RHEL +* Update doc for registry +* Update Postgres examples +* Update doc for Ubuntu install +* Improve remote api doc + +#### Runtime + ++ Add hostconfig to docker inspect ++ Implement `docker log -f` to stream logs ++ Add env variable to disable kernel version warning ++ Add -format to `docker inspect` ++ Support bind-mount for files +- Fix bridge creation on RHEL +- Fix image size calculation +- Make sure iptables are called even if the bridge already exists +- Fix issue with stderr only attach +- Remove init layer when destroying a container +- Fix same port binding on different interfaces +- `docker build` now returns the correct exit code +- Fix `docker port` to display correct port +- `docker build` now check that the dockerfile exists client side +- `docker attach` now returns the correct exit code +- Remove the name entry when the container does not exist + +#### Registry + +* Improve progress bars, add ETA for downloads +* Simultaneous pulls now waits for the first to finish instead of failing +- Tag only the top-layer image when pushing to registry +- Fix issue with offline image transfer +- Fix issue preventing using ':' in password for registry + +#### Other + ++ Add pprof handler for debug ++ Create a Makefile +* Use stdlib tar that now includes fix +* Improve make.sh test script +* Handle SIGQUIT on the daemon +* Disable verbose during tests +* Upgrade to go1.2 for official build +* Improve unit tests +* The test suite now runs all tests even if one fails +* Refactor C in Go (Devmapper) +- Fix OS X compilation + +## 0.7.0 (2013-11-25) + +#### Notable features since 0.6.0 + +* Storage drivers: choose from aufs, device-mapper, or vfs. +* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. +* Links: compose complex software stacks by connecting containers to each other. +* Container naming: organize your containers by giving them memorable names. +* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. +* Offline transfer: push and pull images to the filesystem without losing information. +* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. + +## 0.6.7 (2013-11-21) + +#### Runtime + +* Improve stability, fixes some race conditions +* Skip the volumes mounted when deleting the volumes of container. +* Fix layer size computation: handle hard links correctly +* Use the work Path for docker cp CONTAINER:PATH +* Fix tmp dir never cleanup +* Speedup docker ps +* More informative error message on name collisions +* Fix nameserver regex +* Always return long id's +* Fix container restart race condition +* Keep published ports on docker stop;docker start +* Fix container networking on Fedora +* Correctly express "any address" to iptables +* Fix network setup when reconnecting to ghost container +* Prevent deletion if image is used by a running container +* Lock around read operations in graph + +#### RemoteAPI + +* Return full ID on docker rmi + +#### Client + ++ Add -tree option to images ++ Offline image transfer +* Exit with status 2 on usage error and display usage on stderr +* Do not forward SIGCHLD to container +* Use string timestamp for docker events -since + +#### Other + +* Update to go 1.2rc5 ++ Add /etc/default/docker support to upstart + +## 0.6.6 (2013-11-06) + +#### Runtime + +* Ensure container name on register +* Fix regression in /etc/hosts ++ Add lock around write operations in graph +* Check if port is valid +* Fix restart runtime error with ghost container networking ++ Add some more colors and animals to increase the pool of generated names +* Fix issues in docker inspect ++ Escape apparmor confinement ++ Set environment variables using a file. +* Prevent docker insert to erase something ++ Prevent DNS server conflicts in CreateBridgeIface ++ Validate bind mounts on the server side ++ Use parent image config in docker build +* Fix regression in /etc/hosts + +#### Client + ++ Add -P flag to publish all exposed ports ++ Add -notrunc and -q flags to docker history +* Fix docker commit, tag and import usage ++ Add stars, trusted builds and library flags in docker search +* Fix docker logs with tty + +#### RemoteAPI + +* Make /events API send headers immediately +* Do not split last column docker top ++ Add size to history + +#### Other + ++ Contrib: Desktop integration. Firefox usecase. ++ Dockerfile: bump to go1.2rc3 + +## 0.6.5 (2013-10-29) + +#### Runtime + ++ Containers can now be named ++ Containers can now be linked together for service discovery ++ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors ++ Automatically start crashed containers after a reboot ++ Expose IP, port, and proto as separate environment vars for container links +* Allow ports to be published to specific ips +* Prohibit inter-container communication by default +- Ignore ErrClosedPipe for stdin in Container.Attach +- Remove unused field kernelVersion +* Fix issue when mounting subdirectories of /mnt in container +- Fix untag during removal of images +* Check return value of syscall.Chdir when changing working directory inside dockerinit + +#### Client + +- Only pass stdin to hijack when needed to avoid closed pipe errors +* Use less reflection in command-line method invocation +- Monitor the tty size after starting the container, not prior +- Remove useless os.Exit() calls after log.Fatal + +#### Hack + ++ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian +* Add -p option to invoke debootstrap with http_proxy +- Update install.sh with $sh_c to get sudo/su for modprobe +* Update all the mkimage scripts to use --numeric-owner as a tar argument +* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues + +#### Other + +* Documentation: Fix the flags for nc in example +* Testing: Remove warnings and prevent mount issues +- Testing: Change logic for tty resize to avoid warning in tests +- Builder: Fix race condition in docker build with verbose output +- Registry: Fix content-type for PushImageJSONIndex method +* Contrib: Improve helper tools to generate debian and Arch linux server images + +## 0.6.4 (2013-10-16) + +#### Runtime + +- Add cleanup of container when Start() fails +* Add better comments to utils/stdcopy.go +* Add utils.Errorf for error logging ++ Add -rm to docker run for removing a container on exit +- Remove error messages which are not actually errors +- Fix `docker rm` with volumes +- Fix some error cases where an HTTP body might not be closed +- Fix panic with wrong dockercfg file +- Fix the attach behavior with -i +* Record termination time in state. +- Use empty string so TempDir uses the OS's temp dir automatically +- Make sure to close the network allocators ++ Autorestart containers by default +* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` +* lxc: Allow set_file_cap capability in container +- Move run -rm to the cli only +* Split stdout stderr +* Always create a new session for the container + +#### Testing + +- Add aggregated docker-ci email report +- Add cleanup to remove leftover containers +* Add nightly release to docker-ci +* Add more tests around auth.ResolveAuthConfig +- Remove a few errors in tests +- Catch errClosing error when TCP and UDP proxies are terminated +* Only run certain tests with TESTFLAGS='-run TestName' make.sh +* Prevent docker-ci to test closing PRs +* Replace panic by log.Fatal in tests +- Increase TestRunDetach timeout + +#### Documentation + +* Add initial draft of the Docker infrastructure doc +* Add devenvironment link to CONTRIBUTING.md +* Add `apt-get install curl` to Ubuntu docs +* Add explanation for export restrictions +* Add .dockercfg doc +* Remove Gentoo install notes about #1422 workaround +* Fix help text for -v option +* Fix Ping endpoint documentation +- Fix parameter names in docs for ADD command +- Fix ironic typo in changelog +* Various command fixes in postgres example +* Document how to edit and release docs +- Minor updates to `postgresql_service.rst` +* Clarify LGTM process to contributors +- Corrected error in the package name +* Document what `vagrant up` is actually doing ++ improve doc search results +* Cleanup whitespace in API 1.5 docs +* use angle brackets in MAINTAINER example email +* Update archlinux.rst ++ Changes to a new style for the docs. Includes version switcher. +* Formatting, add information about multiline json +* Improve registry and index REST API documentation +- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 +* Update Gentoo installation documentation now that we're in the portage tree proper +* Cleanup and reorganize docs and tooling for contributors and maintainers +- Minor spelling correction of protocoll -> protocol + +#### Contrib + +* Add vim syntax highlighting for Dockerfiles from @honza +* Add mkimage-arch.sh +* Reorganize contributed completion scripts to add zsh completion + +#### Hack + +* Add vagrant user to the docker group +* Add proper bash completion for "docker push" +* Add xz utils as a runtime dep +* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates ++ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link +* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly ++ Add @tianon to hack/MAINTAINERS +* Improve network performance for VirtualBox +* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) +- Fix contrib/mkimage-debian.sh apt caching prevention ++ Add Dockerfile.tmLanguage to contrib +* Configured FPM to make /etc/init/docker.conf a config file +* Enable SSH Agent forwarding in Vagrant VM +* Several small tweaks/fixes for contrib/mkimage-debian.sh + +#### Other + +- Builder: Abort build if mergeConfig returns an error and fix duplicate error message +- Packaging: Remove deprecated packaging directory +- Registry: Use correct auth config when logging in. +- Registry: Fix the error message so it is the same as the regex + +## 0.6.3 (2013-09-23) + +#### Packaging + +* Add 'docker' group on install for ubuntu package +* Update tar vendor dependency +* Download apt key over HTTPS + +#### Runtime + +- Only copy and change permissions on non-bindmount volumes +* Allow multiple volumes-from +- Fix HTTP imports from STDIN + +#### Documentation + +* Update section on extracting the docker binary after build +* Update development environment docs for new build process +* Remove 'base' image from documentation + +#### Other + +- Client: Fix detach issue +- Registry: Update regular expression to match index + +## 0.6.2 (2013-09-17) + +#### Runtime + ++ Add domainname support ++ Implement image filtering with path.Match +* Remove unnecessary warnings +* Remove os/user dependency +* Only mount the hostname file when the config exists +* Handle signals within the `docker login` command +- UID and GID are now also applied to volumes +- `docker start` set error code upon error +- `docker run` set the same error code as the process started + +#### Builder + ++ Add -rm option in order to remove intermediate containers +* Allow multiline for the RUN instruction + +#### Registry + +* Implement login with private registry +- Fix push issues + +#### Other + ++ Hack: Vendor all dependencies +* Remote API: Bump to v1.5 +* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. +* Documentation: General improvements + +## 0.6.1 (2013-08-23) + +#### Registry + +* Pass "meta" headers in API calls to the registry + +#### Packaging + +- Use correct upstart script with new build tool +- Use libffi-dev, don`t build it from sources +- Remove duplicate mercurial install command + +## 0.6.0 (2013-08-22) + +#### Runtime + ++ Add lxc-conf flag to allow custom lxc options ++ Add an option to set the working directory +* Add Image name to LogEvent tests ++ Add -privileged flag and relevant tests, docs, and examples +* Add websocket support to /container//attach/ws +* Add warning when net.ipv4.ip_forwarding = 0 +* Add hostname to environment +* Add last stable version in `docker version` +- Fix race conditions in parallel pull +- Fix Graph ByParent() to generate list of child images per parent image. +- Fix typo: fmt.Sprint -> fmt.Sprintf +- Fix small \n error un docker build +* Fix to "Inject dockerinit at /.dockerinit" +* Fix #910. print user name to docker info output +* Use Go 1.1.2 for dockerbuilder +* Use ranged for loop on channels +- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete +- Improve CMD, ENTRYPOINT, and attach docs. +- Improve connect message with socket error +- Load authConfig only when needed and fix useless WARNING +- Show tag used when image is missing +* Apply volumes-from before creating volumes +- Make docker run handle SIGINT/SIGTERM +- Prevent crash when .dockercfg not readable +- Install script should be fetched over https, not http. +* API, issue 1471: Use groups for socket permissions +- Correctly detect IPv4 forwarding +* Mount /dev/shm as a tmpfs +- Switch from http to https for get.docker.io +* Let userland proxy handle container-bound traffic +* Update the Docker CLI to specify a value for the "Host" header. +- Change network range to avoid conflict with EC2 DNS +- Reduce connect and read timeout when pinging the registry +* Parallel pull +- Handle ip route showing mask-less IP addresses +* Allow ENTRYPOINT without CMD +- Always consider localhost as a domain name when parsing the FQN repos name +* Refactor checksum + +#### Documentation + +* Add MongoDB image example +* Add instructions for creating and using the docker group +* Add sudo to examples and installation to documentation +* Add ufw doc +* Add a reference to ps -a +* Add information about Docker`s high level tools over LXC. +* Fix typo in docs for docker run -dns +* Fix a typo in the ubuntu installation guide +* Fix to docs regarding adding docker groups +* Update default -H docs +* Update readme with dependencies for building +* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 +* PostgreSQL service example in documentation +* Suggest installing linux-headers by default. +* Change the twitter handle +* Clarify Amazon EC2 installation +* 'Base' image is deprecated and should no longer be referenced in the docs. +* Move note about officially supported kernel +- Solved the logo being squished in Safari + +#### Builder + ++ Add USER instruction do Dockerfile ++ Add workdir support for the Buildfile +* Add no cache for docker build +- Fix docker build and docker events output +- Only count known instructions as build steps +- Make sure ENV instruction within build perform a commit each time +- Forbid certain paths within docker build ADD +- Repository name (and optionally a tag) in build usage +- Make sure ADD will create everything in 0755 + +#### Remote API + +* Sort Images by most recent creation date. +* Reworking opaque requests in registry module +* Add image name in /events +* Use mime pkg to parse Content-Type +* 650 http utils and user agent field + +#### Hack + ++ Bash Completion: Limit commands to containers of a relevant state +* Add docker dependencies coverage testing into docker-ci + +#### Packaging + ++ Docker-brew 0.5.2 support and memory footprint reduction +* Add new docker dependencies into docker-ci +- Revert "docker.upstart: avoid spawning a `sh` process" ++ Docker-brew and Docker standard library ++ Release docker with docker +* Fix the upstart script generated by get.docker.io +* Enabled the docs to generate manpages. +* Revert Bind daemon to 0.0.0.0 in Vagrant. + +#### Register + +* Improve auth push +* Registry unit tests + mock registry + +#### Tests + +* Improve TestKillDifferentUser to prevent timeout on buildbot +- Fix typo in TestBindMounts (runContainer called without image) +* Improve TestGetContainersTop so it does not rely on sleep +* Relax the lo interface test to allow iface index != 1 +* Add registry functional test to docker-ci +* Add some tests in server and utils + +#### Other + +* Contrib: bash completion script +* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host +* Don`t read from stdout when only attached to stdin + +## 0.5.3 (2013-08-13) + +#### Runtime + +* Use docker group for socket permissions +- Spawn shell within upstart script +- Handle ip route showing mask-less IP addresses +- Add hostname to environment + +#### Builder + +- Make sure ENV instruction within build perform a commit each time + +## 0.5.2 (2013-08-08) + +* Builder: Forbid certain paths within docker build ADD +- Runtime: Change network range to avoid conflict with EC2 DNS +* API: Change daemon to listen on unix socket by default + +## 0.5.1 (2013-07-30) + +#### Runtime + ++ Add `ps` args to `docker top` ++ Add support for container ID files (pidfile like) ++ Add container=lxc in default env ++ Support networkless containers with `docker run -n` and `docker -d -b=none` +* Stdout/stderr logs are now stored in the same file as JSON +* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. +* Change .dockercfg format to json and support multiple auth remote +- Do not override volumes from config +- Fix issue with EXPOSE override + +#### API + ++ Docker client now sets useragent (RFC 2616) ++ Add /events endpoint + +#### Builder + ++ ADD command now understands URLs ++ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables +- Create directories with 755 instead of 700 within ADD instruction + +#### Hack + +* Simplify unit tests with helpers +* Improve docker.upstart event +* Add coverage testing into docker-ci + +## 0.5.0 (2013-07-17) + +#### Runtime + ++ List all processes running inside a container with 'docker top' ++ Host directories can be mounted as volumes with 'docker run -v' ++ Containers can expose public UDP ports (eg, '-p 123/udp') ++ Optionally specify an exact public port (eg. '-p 80:4500') +* 'docker login' supports additional options +- Don't save a container`s hostname when committing an image. + +#### Registry + ++ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries +- Fix issues when uploading images to a private registry + +#### Builder + ++ ENTRYPOINT instruction sets a default binary entry point to a container ++ VOLUME instruction marks a part of the container as persistent data +* 'docker build' displays the full output of a build by default + +## 0.4.8 (2013-07-01) + ++ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. +- Tests: Fix issues in the test suite + +## 0.4.7 (2013-06-28) + +#### Remote API + +* The progress bar updates faster when downloading and uploading large files +- Fix a bug in the optional unix socket transport + +#### Runtime + +* Improve detection of kernel version ++ Host directories can be mounted as volumes with 'docker run -b' +- fix an issue when only attaching to stdin +* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts + +#### Hack + +* Improve test suite and dev environment +* Remove dependency on unit tests on 'os/user' + +#### Other + +* Registry: easier push/pull to a custom registry ++ Documentation: add terminology section + +## 0.4.6 (2013-06-22) + +- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. + +## 0.4.5 (2013-06-21) + ++ Builder: 'docker build git://URL' fetches and builds a remote git repository +* Runtime: 'docker ps -s' optionally prints container size +* Tests: improved and simplified +- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. +- Builder: fix a regression when using ADD with single regular file. + +## 0.4.4 (2013-06-19) + +- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. + +## 0.4.3 (2013-06-19) + +#### Builder + ++ ADD of a local file will detect tar archives and unpack them +* ADD improvements: use tar for copy + automatically unpack local archives +* ADD uses tar/untar for copies instead of calling 'cp -ar' +* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. +- Fix a bug which caused builds to fail if ADD was the first command +* Nicer output for 'docker build' + +#### Runtime + +* Remove bsdtar dependency +* Add unix socket and multiple -H support +* Prevent rm of running containers +* Use go1.1 cookiejar +- Fix issue detaching from running TTY container +- Forbid parallel push/pull for a single image/repo. Fixes #311 +- Fix race condition within Run command when attaching. + +#### Client + +* HumanReadable ProgressBar sizes in pull +* Fix docker version`s git commit output + +#### API + +* Send all tags on History API call +* Add tag lookup to history command. Fixes #882 + +#### Documentation + +- Fix missing command in irc bouncer example + +## 0.4.2 (2013-06-17) + +- Packaging: Bumped version to work around an Ubuntu bug + +## 0.4.1 (2013-06-17) + +#### Remote Api + ++ Add flag to enable cross domain requests ++ Add images and containers sizes in docker ps and docker images + +#### Runtime + ++ Configure dns configuration host-wide with 'docker -d -dns' ++ Detect faulty DNS configuration and replace it with a public default ++ Allow docker run : ++ You can now specify public port (ex: -p 80:4500) +* Improve image removal to garbage-collect unreferenced parents + +#### Client + +* Allow multiple params in inspect +* Print the container id before the hijack in `docker run` + +#### Registry + +* Add regexp check on repo`s name +* Move auth to the client +- Remove login check on pull + +#### Other + +* Vagrantfile: Add the rest api port to vagrantfile`s port_forward +* Upgrade to Go 1.1 +- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n + +## 0.4.0 (2013-06-03) + +#### Builder + ++ Introducing Builder ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile + +#### Remote API + ++ Introducing Remote API ++ control Docker programmatically using a simple HTTP/json API + +#### Runtime + +* Various reliability and usability improvements + +## 0.3.4 (2013-05-30) + +#### Builder + ++ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile ++ 'docker build -t FOO' applies the tag FOO to the newly built container. + +#### Runtime + ++ Interactive TTYs correctly handle window resize +* Fix how configuration is merged between layers + +#### Remote API + ++ Split stdout and stderr on 'docker run' ++ Optionally listen on a different IP and port (use at your own risk) + +#### Documentation + +* Improve install instructions. + +## 0.3.3 (2013-05-23) + +- Registry: Fix push regression +- Various bugfixes + +## 0.3.2 (2013-05-09) + +#### Registry + +* Improve the checksum process +* Use the size to have a good progress bar while pushing +* Use the actual archive if it exists in order to speed up the push +- Fix error 400 on push + +#### Runtime + +* Store the actual archive on commit + +## 0.3.1 (2013-05-08) + +#### Builder + ++ Implement the autorun capability within docker builder ++ Add caching to docker builder ++ Add support for docker builder with native API as top level command ++ Implement ENV within docker builder +- Check the command existence prior create and add Unit tests for the case +* use any whitespaces instead of tabs + +#### Runtime + ++ Add go version to debug infos +* Kernel version - don`t show the dash if flavor is empty + +#### Registry + ++ Add docker search top level command in order to search a repository +- Fix pull for official images with specific tag +- Fix issue when login in with a different user and trying to push +* Improve checksum - async calculation + +#### Images + ++ Output graph of images to dot (graphviz) +- Fix ByParent function + +#### Documentation + ++ New introduction and high-level overview ++ Add the documentation for docker builder +- CSS fix for docker documentation to make REST API docs look better. +- Fix CouchDB example page header mistake +- Fix README formatting +* Update www.docker.io website. + +#### Other + ++ Website: new high-level overview +- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc +* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker + +## 0.3.0 (2013-05-06) + +#### Runtime + +- Fix the command existence check +- strings.Split may return an empty string on no match +- Fix an index out of range crash if cgroup memory is not + +#### Documentation + +* Various improvements ++ New example: sharing data between 2 couchdb databases + +#### Other + +* Vagrant: Use only one deb line in /etc/apt ++ Registry: Implement the new registry + +## 0.2.2 (2013-05-03) + ++ Support for data volumes ('docker run -v=PATH') ++ Share data volumes between containers ('docker run -volumes-from') ++ Improve documentation +* Upgrade to Go 1.0.3 +* Various upgrades to the dev environment for contributors + +## 0.2.1 (2013-05-01) + ++ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. +* Improve install process on Vagrant ++ New Dockerfile operation: "maintainer" ++ New Dockerfile operation: "expose" ++ New Dockerfile operation: "cmd" ++ Contrib script to build a Debian base layer ++ 'docker -d -r': restart crashed containers at daemon startup +* Runtime: improve test coverage + +## 0.2.0 (2013-04-23) + +- Runtime: ghost containers can be killed and waited for +* Documentation: update install instructions +- Packaging: fix Vagrantfile +- Development: automate releasing binaries and ubuntu packages ++ Add a changelog +- Various bugfixes + +## 0.1.8 (2013-04-22) + +- Dynamically detect cgroup capabilities +- Issue stability warning on kernels <3.8 +- 'docker push' buffers on disk instead of memory +- Fix 'docker diff' for removed files +- Fix 'docker stop' for ghost containers +- Fix handling of pidfile +- Various bugfixes and stability improvements + +## 0.1.7 (2013-04-18) + +- Container ports are available on localhost +- 'docker ps' shows allocated TCP ports +- Contributors can run 'make hack' to start a continuous integration VM +- Streamline ubuntu packaging & uploading +- Various bugfixes and stability improvements + +## 0.1.6 (2013-04-17) + +- Record the author an image with 'docker commit -author' + +## 0.1.5 (2013-04-17) + +- Disable standalone mode +- Use a custom DNS resolver with 'docker -d -dns' +- Detect ghost containers +- Improve diagnosis of missing system capabilities +- Allow disabling memory limits at compile time +- Add debian packaging +- Documentation: installing on Arch Linux +- Documentation: running Redis on docker +- Fix lxc 0.9 compatibility +- Automatically load aufs module +- Various bugfixes and stability improvements + +## 0.1.4 (2013-04-09) + +- Full support for TTY emulation +- Detach from a TTY session with the escape sequence `C-p C-q` +- Various bugfixes and stability improvements +- Minor UI improvements +- Automatically create our own bridge interface 'docker0' + +## 0.1.3 (2013-04-04) + +- Choose TCP frontend port with '-p :PORT' +- Layer format is versioned +- Major reliability improvements to the process manager +- Various bugfixes and stability improvements + +## 0.1.2 (2013-04-03) + +- Set container hostname with 'docker run -h' +- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' +- Various bugfixes and stability improvements +- UI polish +- Progress bar on push/pull +- Use XZ compression by default +- Make IP allocator lazy + +## 0.1.1 (2013-03-31) + +- Display shorthand IDs for convenience +- Stabilize process management +- Layers can include a commit message +- Simplified 'docker attach' +- Fix support for re-attaching +- Various bugfixes and stability improvements +- Auto-download at run +- Auto-login on push +- Beefed up documentation + +## 0.1.0 (2013-03-23) + +Initial public release + +- Implement registry in order to push/pull images +- TCP port allocation +- Fix termcaps on Linux +- Add documentation +- Add Vagrant support with Vagrantfile +- Add unit tests +- Add repository/tags to ease image management +- Improve the layer implementation diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md new file mode 100644 index 0000000000..eb5f8ab0e9 --- /dev/null +++ b/vendor/github.com/docker/docker/CONTRIBUTING.md @@ -0,0 +1,401 @@ +# Contributing to Docker + +Want to hack on Docker? Awesome! We have a contributor's guide that explains +[setting up a Docker development environment and the contribution +process](https://docs.docker.com/opensource/project/who-written-for/). + +[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) + +This page contains information about reporting issues as well as some tips and +guidelines useful to experienced open source contributors. Finally, make sure +you read our [community guidelines](#docker-community-guidelines) before you +start participating. + +## Topics + +* [Reporting Security Issues](#reporting-security-issues) +* [Design and Cleanup Proposals](#design-and-cleanup-proposals) +* [Reporting Issues](#reporting-other-issues) +* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) +* [Community Guidelines](#docker-community-guidelines) + +## Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +Security reports are greatly appreciated and we will publicly thank you for it. +We also like to send gifts—if you're into Docker schwag, make sure to let +us know. We currently do not offer a paid security bounty program, but are not +ruling it out in the future. + + +## Reporting other issues + +A great way to contribute to the project is to send a detailed report when you +encounter an issue. We always appreciate a well-written, thorough bug report, +and will thank you for it! + +Check that [our issue database](https://github.com/docker/docker/issues) +doesn't already include that problem or suggestion before submitting an issue. +If you find a match, you can use the "subscribe" button to get notified on +updates. Do *not* leave random "+1" or "I have this too" comments, as they +only clutter the discussion, and don't help resolving it. However, if you +have ways to reproduce the issue or have additional information that may help +resolving the issue, please leave a comment. + +When reporting issues, always include: + +* The output of `docker version`. +* The output of `docker info`. + +Also include the steps required to reproduce the problem if possible and +applicable. This information will help us review and fix your issue faster. +When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). +Don't forget to remove sensitive data from your logfiles before posting (you can +replace those parts with "REDACTED"). + +## Quick contribution tips and guidelines + +This section gives the experienced contributor some tips and guidelines. + +### Pull requests are always welcome + +Not sure if that typo is worth a pull request? Found a bug and know how to fix +it? Do it! We will appreciate it. Any significant improvement should be +documented as [a GitHub issue](https://github.com/docker/docker/issues) before +anybody starts working on it. + +We are always thrilled to receive pull requests. We do our best to process them +quickly. If your pull request is not accepted on the first try, +don't get discouraged! Our contributor's guide explains [the review process we +use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). + +### Design and cleanup proposals + +You can propose new designs for existing Docker features. You can also design +entirely new features. We really appreciate contributors who want to refactor or +otherwise cleanup our project. For information on making these types of +contributions, see [the advanced contribution +section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in +the contributors guide. + +We try hard to keep Docker lean and focused. Docker can't do everything for +everybody. This means that we might decide against incorporating a new feature. +However, there might be a way to implement that feature *on top of* Docker. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Forums + A public forum for users to discuss questions and explore current design patterns and + best practices about Docker and related projects in the Docker Ecosystem. To participate, + just log in with your Docker Hub account on https://forums.docker.com. +
Internet Relay Chat (IRC) +

+ IRC a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+

+ Read our IRC quickstart guide + for an easy way to get started. +

+
Google Group + The docker-dev + group is for contributors and other people contributing to the Docker project. + You can join them without a google account by sending an email to + docker-dev+subscribe@googlegroups.com. + After receiving the join-request message, you can simply reply to that to confirm the subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 17000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ + +### Conventions + +Fork the repository and make changes on your fork in a feature branch: + +- If it's a bug fix branch, name it XXXX-something where XXXX is the number of + the issue. +- If it's a feature branch, create an enhancement issue to announce + your intentions, and name it XXXX-something where XXXX is the number of the + issue. + +Submit unit tests for your changes. Go has a great test framework built in; use +it! Take a look at existing tests for inspiration. [Run the full test +suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before +submitting a pull request. + +Update the documentation when creating or modifying features. Test your +documentation changes for clarity, concision, and correctness, as well as a +clean documentation build. See our contributors guide for [our style +guide](https://docs.docker.com/opensource/doc-style) and instructions on [building +the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). + +Write clean code. Universally formatted code promotes ease of writing, reading, +and maintenance. Always run `gofmt -s -w file.go` on each changed file before +committing your changes. Most editors have plug-ins that do this automatically. + +Pull request descriptions should be as clear as possible and include a reference +to all the issues that they address. + +Commit messages must start with a capitalized and short summary (max. 50 chars) +written in the imperative, followed by an optional, more detailed explanatory +text which is separated from the summary by an empty line. + +Code review comments may be added to your pull request. Discuss, then make the +suggested modifications and push additional commits to your feature branch. Post +a comment after pushing. New commits show up in the pull request automatically, +but the reviewers are notified only when you comment. + +Pull requests must be cleanly rebased on top of master without multiple branches +mixed into the PR. + +**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your +feature branch to update your pull request rather than `merge master`. + +Before you make a pull request, squash your commits into logical units of work +using `git rebase -i` and `git push -f`. A logical unit of work is a consistent +set of patches that should be reviewed together: for example, upgrading the +version of a vendored dependency and taking advantage of its now available new +feature constitute two separate units of work. Implementing a new function and +calling it in another file constitute a single logical unit of work. The very +high majority of submissions should have a single commit, so if in doubt: squash +down to one. + +After every commit, [make sure the test suite passes] +(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation +changes in the same pull request so that a revert would remove all traces of +the feature or fix. + +Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that +close an issue. Including references automatically closes the issue on a merge. + +Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly +from the Git history. + +Please see the [Coding Style](#coding-style) for further guidelines. + +### Merge approval + +Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to +indicate acceptance. + +A change requires LGTMs from an absolute majority of the maintainers of each +component affected. For example, if a change affects `docs/` and `registry/`, it +needs an absolute majority from the maintainers of `docs/` AND, separately, an +absolute majority of the maintainers of `registry/`. + +For more details, see the [MAINTAINERS](MAINTAINERS) page. + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. + +### How can I become a maintainer? + +The procedures for adding new maintainers are explained in the +global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) +file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) +repository. + +Don't forget: being a maintainer is a time investment. Make sure you +will have time to make yourself available. You don't have to be a +maintainer to make a difference on the project! + +## Docker community guidelines + +We want to keep the Docker community awesome, growing and collaborative. We need +your help to keep it that way. To help with this we've come up with some general +guidelines for the community as a whole: + +* Be nice: Be courteous, respectful and polite to fellow community members: + no regional, racial, gender, or other abuse will be tolerated. We like + nice people way better than mean ones! + +* Encourage diversity and participation: Make everyone in our community feel + welcome, regardless of their background and the extent of their + contributions, and do everything possible to encourage participation in + our community. + +* Keep it legal: Basically, don't get us in trouble. Share only content that + you own, do not share private or sensitive information, and don't break + the law. + +* Stay on topic: Make sure that you are posting to the correct channel and + avoid off-topic discussions. Remember when you update an issue or respond + to an email you are potentially sending to a large number of people. Please + consider this before you update. Also remember that nobody likes spam. + +* Don't send email to the maintainers: There's no need to send email to the + maintainers to ask them to investigate an issue or to take a look at a + pull request. Instead of sending an email, GitHub mentions should be + used to ping maintainers to review a pull request, a proposal or an + issue. + +### Guideline violations — 3 strikes method + +The point of this section is not to find opportunities to punish people, but we +do need a fair way to deal with people who are making our community suck. + +1. First occurrence: We'll give you a friendly, but public reminder that the + behavior is inappropriate according to our guidelines. + +2. Second occurrence: We will send you a private message with a warning that + any additional violations will result in removal from the community. + +3. Third occurrence: Depending on the violation, we may need to delete or ban + your account. + +**Notes:** + +* Obvious spammers are banned on first occurrence. If we don't do this, we'll + have spam all over the place. + +* Violations are forgiven after 6 months of good behavior, and we won't hold a + grudge. + +* People who commit minor infractions will get some education, rather than + hammering them in the 3 strikes process. + +* The rules apply equally to everyone in the community, no matter how much + you've contributed. + +* Extreme violations of a threatening, abusive, destructive or illegal nature + will be addressed immediately and are not subject to 3 strikes or forgiveness. + +* Contact abuse@docker.com to report abuse or appeal violations. In the case of + appeals, we know that mistakes happen, and we'll work with you to come up with a + fair solution if there has been a misunderstanding. + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](https://golang.org/doc/effective_go.html). The +[Go Blog](https://blog.golang.org) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile new file mode 100644 index 0000000000..ce2d702807 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile @@ -0,0 +1,246 @@ +# This file describes the standard way to build Docker, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# # Publish a release: +# docker run --privileged \ +# -e AWS_S3_BUCKET=baz \ +# -e AWS_ACCESS_KEY=foo \ +# -e AWS_SECRET_KEY=bar \ +# -e GPG_PASSPHRASE=gloubiboulga \ +# docker hack/release.sh +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Add zfs ppa +RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 \ + || apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 +RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + apt-utils \ + aufs-tools \ + automake \ + bash-completion \ + binutils-mingw-w64 \ + bsdmainutils \ + btrfs-tools \ + build-essential \ + clang \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + gcc-mingw-w64 \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libnl-3-dev \ + libprotobuf-c0-dev \ + libprotobuf-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + net-tools \ + pkg-config \ + protobuf-compiler \ + protobuf-c-compiler \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + ubuntu-zfs \ + xfsprogs \ + vim-common \ + libzfs-dev \ + tar \ + zip \ + --no-install-recommends \ + && pip install awscli==1.10.15 +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Configure the container for OSX cross compilation +ENV OSX_SDK MacOSX10.11.sdk +ENV OSX_CROSS_COMMIT a9317c18a3a457ca0a657f08cc4d0d43c6cf8953 +RUN set -x \ + && export OSXCROSS_PATH="/osxcross" \ + && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ + && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ + && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ + && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh +ENV PATH /osxcross/target/bin:$PATH + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Compile Go for cross compilation +ENV DOCKER_CROSSPLATFORMS \ + linux/386 linux/arm \ + darwin/amd64 \ + freebsd/amd64 freebsd/386 freebsd/arm \ + windows/amd64 windows/386 \ + solaris/amd64 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install CRIU for checkpoint/restore support +ENV CRIU_VERSION 2.2 +RUN mkdir -p /usr/src/criu \ + && curl -sSL https://github.com/xemul/criu/archive/v${CRIU_VERSION}.tar.gz | tar -v -C /usr/src/criu/ -xz --strip-components=1 \ + && cd /usr/src/criu \ + && make \ + && make install-criu + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc +# Add integration helps to bashrc +RUN echo "source $PWD/hack/make/.integration-test-helpers" >> /etc/bash.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \ + busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ + debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \ + hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy bindata + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.aarch64 b/vendor/github.com/docker/docker/Dockerfile.aarch64 new file mode 100644 index 0000000000..6112f802f7 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.aarch64 @@ -0,0 +1,175 @@ +# This file describes the standard way to build Docker on aarch64, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.aarch64 . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM aarch64/ubuntu:wily + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + g++ \ + gcc \ + git \ + iptables \ + jq \ + libapparmor-dev \ + libc6-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-dev \ + mercurial \ + net-tools \ + parallel \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + gccgo \ + iproute2 \ + iputils-ping \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support aarch64 properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# We don't have official binary tarballs for ARM64, eigher for Go or bootstrap, +# so we use gccgo as bootstrap to build Go from source code. +# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because +# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. +ENV GO_VERSION 1.7.5 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ + && cd /usr/src/go/src \ + && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash + +ENV PATH /usr/src/go/bin:$PATH +ENV GOPATH /go + +# Only install one version of the registry, because old version which support +# schema1 manifests is not working on ARM64, we should skip integration-cli +# tests for schema1 manifests on ARM64. +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \ + aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \ + aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \ + aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.armhf b/vendor/github.com/docker/docker/Dockerfile.armhf new file mode 100644 index 0000000000..1aebc166b3 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.armhf @@ -0,0 +1,182 @@ +# This file describes the standard way to build Docker on ARMv7, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.armhf . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM armhf/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + createrepo \ + curl \ + cmake \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends \ + && pip install awscli==1.10.15 + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install Go +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# We're building for armhf, which is ARMv7, so let's be explicit about that +ENV GOARCH arm +ENV GOARM 7 + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \ + armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ + armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \ + armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.ppc64le b/vendor/github.com/docker/docker/Dockerfile.ppc64le new file mode 100644 index 0000000000..1f9f5006ff --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.ppc64le @@ -0,0 +1,188 @@ +# This file describes the standard way to build Docker on ppc64le, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.ppc64le . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM ppc64le/debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support ppc64le properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + + +# Install Go +# NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4 +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \ + ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ + ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \ + ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.s390x b/vendor/github.com/docker/docker/Dockerfile.s390x new file mode 100644 index 0000000000..ba94bc70aa --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.s390x @@ -0,0 +1,190 @@ +# This file describes the standard way to build Docker on s390x, using docker +# +# Usage: +# +# # Assemble the full dev environment. This is slow the first time. +# docker build -t docker -f Dockerfile.s390x . +# +# # Mount your source in an interactive container for quick testing: +# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash +# +# # Run the test suite: +# docker run --privileged docker hack/make.sh test-unit test-integration-cli test-docker-py +# +# Note: AppArmor used to mess with privileged mode, but this is no longer +# the case. Therefore, you don't have to disable it anymore. +# + +FROM s390x/gcc:6.1 + +# Packaged dependencies +RUN apt-get update && apt-get install -y \ + apparmor \ + aufs-tools \ + automake \ + bash-completion \ + btrfs-tools \ + build-essential \ + cmake \ + createrepo \ + curl \ + dpkg-sig \ + git \ + iptables \ + jq \ + net-tools \ + libapparmor-dev \ + libcap-dev \ + libltdl-dev \ + libsqlite3-dev \ + libsystemd-journal-dev \ + libtool \ + mercurial \ + pkg-config \ + python-dev \ + python-mock \ + python-pip \ + python-websocket \ + xfsprogs \ + tar \ + vim-common \ + --no-install-recommends + +# glibc in Debian has a bug specific to s390x that won't be fixed until Debian 8.6 is released +# - https://github.com/docker/docker/issues/24748 +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=890b7a4b33d482b5c768ab47d70758b80227e9bc +# - https://sourceware.org/git/?p=glibc.git;a=commit;h=2e807f29595eb5b1e5d0decc6e356a3562ecc58e +RUN echo 'deb http://httpredir.debian.org/debian jessie-proposed-updates main' >> /etc/apt/sources.list.d/pu.list \ + && apt-get update \ + && apt-get install -y libc6 \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in jessie is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Get lvm2 source for compiling statically +ENV LVM2_VERSION 2.02.103 +RUN mkdir -p /usr/local/lvm2 \ + && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ + | tar -xzC /usr/local/lvm2 --strip-components=1 +# See https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags + +# Fix platform enablement in lvm2 to support s390x properly +RUN set -e \ + && for f in config.guess config.sub; do \ + curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ + done +# "arch.c:78:2: error: #error the arch code needs to know about your machine type" + +# Compile and install lvm2 +RUN cd /usr/local/lvm2 \ + && ./configure \ + --build="$(gcc -print-multiarch)" \ + --enable-static_link \ + && make device-mapper \ + && make install_device-mapper +# See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL + +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ + | tar -xzC /usr/local + +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go + +# Dependency for golint +ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 +RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ + && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) + +# Grab Go's lint tool +ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 +RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ + && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ + && go install -v github.com/golang/lint/golint + +# Install two versions of the registry. The first is an older version that +# only supports schema1 manifests. The second is a newer version that supports +# both. This allows integration-cli tests to cover push/pull with both schema1 +# and schema2 manifests. +ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd +ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ + && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ + && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ + go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ + && rm -rf "$GOPATH" + +# Install notary and notary-server +ENV NOTARY_VERSION v0.4.2 +RUN set -x \ + && export GOPATH="$(mktemp -d)" \ + && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ + && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ + && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ + go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ + && rm -rf "$GOPATH" + +# Get the "docker-py" source so we can run their integration tests +ENV DOCKER_PY_COMMIT e2655f658408f9ad1f62abdef3eb6ed43c0cf324 +RUN git clone https://github.com/docker/docker-py.git /docker-py \ + && cd /docker-py \ + && git checkout -q $DOCKER_PY_COMMIT \ + && pip install -r test-requirements.txt + +# Set user.email so crosbymichael's in-container merge commits go smoothly +RUN git config --global user.email 'docker-dummy@example.com' + +# Add an unprivileged user to be used for tests which need it +RUN groupadd -r docker +RUN useradd --create-home --gid docker unprivilegeduser + +VOLUME /var/lib/docker +WORKDIR /go/src/github.com/docker/docker +ENV DOCKER_BUILDTAGS apparmor selinux seccomp + +# Let us use a .bashrc file +RUN ln -sfv $PWD/.bashrc ~/.bashrc + +# Register Docker's bash completion. +RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker + +# Get useful and necessary Hub images so we can "docker load" locally instead of pulling +COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ +RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ + s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \ + s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ + s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \ + s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 +# See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) + +# Install tomlv, vndr, runc, containerd, tini, docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy + +# Wrap all commands in the "docker-in-docker" script to allow nested containers +ENTRYPOINT ["hack/dind"] + +# Upload docker source +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple new file mode 100644 index 0000000000..8eeb3d96bb --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.simple @@ -0,0 +1,73 @@ +# docker build -t docker:simple -f Dockerfile.simple . +# docker run --rm docker:simple hack/make.sh dynbinary +# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit +# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli + +# This represents the bare minimum required to build and test Docker. + +FROM debian:jessie + +# allow replacing httpredir or deb mirror +ARG APT_MIRROR=deb.debian.org +RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list + +# Compile and runtime deps +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies +# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + btrfs-tools \ + build-essential \ + curl \ + cmake \ + gcc \ + git \ + libapparmor-dev \ + libdevmapper-dev \ + libsqlite3-dev \ + \ + ca-certificates \ + e2fsprogs \ + iptables \ + procps \ + xfsprogs \ + xz-utils \ + \ + aufs-tools \ + vim-common \ + && rm -rf /var/lib/apt/lists/* + +# Install seccomp: the version shipped in trusty is too old +ENV SECCOMP_VERSION 2.3.1 +RUN set -x \ + && export SECCOMP_PATH="$(mktemp -d)" \ + && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ + | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ + && ( \ + cd "$SECCOMP_PATH" \ + && ./configure --prefix=/usr/local \ + && make \ + && make install \ + && ldconfig \ + ) \ + && rm -rf "$SECCOMP_PATH" + +# Install Go +# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines +# will need updating, to avoid errors. Ping #docker-maintainers on IRC +# with a heads-up. +ENV GO_VERSION 1.7.5 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ + | tar -xzC /usr/local +ENV PATH /go/bin:/usr/local/go/bin:$PATH +ENV GOPATH /go +ENV CGO_LDFLAGS -L/lib + +# Install runc, containerd, tini and docker-proxy +# Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits +COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh +RUN /tmp/install-binaries.sh runc containerd tini proxy + +ENV AUTO_GOPATH 1 +WORKDIR /usr/src/docker +COPY . /usr/src/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.solaris b/vendor/github.com/docker/docker/Dockerfile.solaris new file mode 100644 index 0000000000..bb342e5e6a --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.solaris @@ -0,0 +1,20 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +ENV DOCKER_CROSSPLATFORMS solaris/amd64 +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.windows b/vendor/github.com/docker/docker/Dockerfile.windows new file mode 100644 index 0000000000..652d07275e --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.windows @@ -0,0 +1,267 @@ +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016 or Windows 10. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 or Windows 10 with all Windows updates applied. The major +# build number must be at least 14393. This can be confirmed, for example, by +# running the following from an elevated PowerShell prompt - this sample output +# is from a fully up to date machine as at mid-November 2016: +# +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.447.amd64fre.rs1_release_inmarket.161102-0100 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md +# +# 4. If building in a Hyper-V VM: For Windows Server 2016 using Windows Server +# containers as the default option, it is recommended you have at least 1GB +# of memory assigned; For Windows 10 where Hyper-V Containers are employed, you +# should have at least 4GB of memory assigned. Note also, to run Hyper-V +# containers in a VM, it is necessary to configure the VM for nested virtualization. + +# ----------------------------------------------------------------------------------------- + + +# Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker +# +# +# 3. Build a docker image with the components required to build the docker binaries from source +# by running one of the following: +# +# >> docker build -t nativebuildimage -f Dockerfile.windows . +# >> docker build -t nativebuildimage -f Dockerfile.windows -m 2GB . (if using Hyper-V containers) +# +# +# 4. Build the docker executable binaries by running one of the following: +# +# >> docker run --name binaries nativebuildimage hack\make.ps1 -Binary +# >> docker run --name binaries -m 2GB nativebuildimage hack\make.ps1 -Binary (if using Hyper-V containers) +# +# +# 5. Copy the binaries out of the container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\docker.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\dockerd.exe C:\HostPath\dockerd.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage +# + +# ----------------------------------------------------------------------------------------- + + +# The validation tests can either run in a container, or directly on the host. To run in a +# container, ensure you have created the nativebuildimage above. Then run one of the +# following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -DCO -PkgImports -GoFormat (if using Hyper-V containers) + +# To run the validation tests on the host, from the root of the repository, run the +# following from a Windows PowerShell prompt (elevation is not required): (Note Go +# must be installed to run these tests) +# +# >> hack\make.ps1 -DCO -PkgImports -GoFormat + +# ----------------------------------------------------------------------------------------- + + +# To run unit tests, ensure you have created the nativebuildimage above. Then run one of +# the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run --rm nativebuildimage hack\make.ps1 -TestUnit +# >> docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit (if using Hyper-V containers) + + +# ----------------------------------------------------------------------------------------- + + +# To run all tests and binary build, ensure you have created the nativebuildimage above. Then +# run one of the following from an (elevated) Windows PowerShell prompt: +# +# >> docker run nativebuildimage hack\make.ps1 -All +# >> docker run -m 2GB nativebuildimage hack\make.ps1 -All (if using Hyper-V containers) + +# ----------------------------------------------------------------------------------------- + + +# Important notes: +# --------------- +# +# Don't attempt to use a bind-mount to pass a local directory as the bundles target +# directory. It does not work (golang attempts for follow a mapped folder incorrectly). +# Instead, use docker cp as per the example. +# +# go.zip is not removed from the image as it is used by the Windows CI servers +# to ensure the host and image are running consistent versions of go. +# +# Nanoserver support is a work in progress. Although the image will build if the +# FROM statement is updated, it will not work when running autogen through hack\make.ps1. +# It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently +# quit due to the use of console hooks which are not available. +# +# The docker integration tests do not currently run in a container on Windows, predominantly +# due to Windows not supporting privileged mode, so anything using a volume would fail. +# They (along with the rest of the docker CI suite) can be run using +# https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1. +# +# ----------------------------------------------------------------------------------------- + + +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-command"] + +# Environment variable notes: +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. +# - FROM_DOCKERFILE is used for detection of building within a container. +ENV GO_VERSION=1.7.5 ` + GIT_VERSION=2.11.0 ` + GOPATH=C:\go ` + FROM_DOCKERFILE=1 + +RUN ` + $ErrorActionPreference = 'Stop'; ` + $ProgressPreference = 'SilentlyContinue'; ` + ` + Function Test-Nano() { ` + $EditionId = (Get-ItemProperty -Path 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name 'EditionID').EditionId; ` + return (($EditionId -eq 'ServerStandardNano') -or ($EditionId -eq 'ServerDataCenterNano') -or ($EditionId -eq 'NanoServer')); ` + }` + ` + Function Download-File([string] $source, [string] $target) { ` + if (Test-Nano) { ` + $handler = New-Object System.Net.Http.HttpClientHandler; ` + $client = New-Object System.Net.Http.HttpClient($handler); ` + $client.Timeout = New-Object System.TimeSpan(0, 30, 0); ` + $cancelTokenSource = [System.Threading.CancellationTokenSource]::new(); ` + $responseMsg = $client.GetAsync([System.Uri]::new($source), $cancelTokenSource.Token); ` + $responseMsg.Wait(); ` + if (!$responseMsg.IsCanceled) { ` + $response = $responseMsg.Result; ` + if ($response.IsSuccessStatusCode) { ` + $downloadedFileStream = [System.IO.FileStream]::new($target, [System.IO.FileMode]::Create, [System.IO.FileAccess]::Write); ` + $copyStreamOp = $response.Content.CopyToAsync($downloadedFileStream); ` + $copyStreamOp.Wait(); ` + $downloadedFileStream.Close(); ` + if ($copyStreamOp.Exception -ne $null) { throw $copyStreamOp.Exception } ` + } ` + } else { ` + Throw ("Failed to download " + $source) ` + }` + } else { ` + $webClient = New-Object System.Net.WebClient; ` + $webClient.DownloadFile($source, $target); ` + } ` + } ` + ` + setx /M PATH $('C:\git\bin;C:\git\usr\bin;'+$Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + Write-Host INFO: Downloading git...; ` + $location='https://github.com/git-for-windows/git/releases/download/v'+$env:GIT_VERSION+'.windows.1/PortableGit-'+$env:GIT_VERSION+'-64-bit.7z.exe'; ` + Download-File $location C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') C:\go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip C:\gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip C:\runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip C:\binutils.zip; ` + ` + Write-Host INFO: Installing PS7Zip package...; ` + Install-Package PS7Zip -Force | Out-Null; ` + Write-Host INFO: Importing PS7Zip...; ` + Import-Module PS7Zip -Force; ` + New-Item C:\git -ItemType Directory | Out-Null ; ` + cd C:\git; ` + Write-Host INFO: Extracting git...; ` + Expand-7Zip C:\gitsetup.7z.exe | Out-Null; ` + cd C:\; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler 1 of 3...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 2 of 3...; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Write-Host INFO: Expanding compiler 3 of 3...; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` + Write-Host INFO: Removing downloaded files...; ` + Remove-Item C:\gcc.zip; ` + Remove-Item C:\runtime.zip; ` + Remove-Item C:\binutils.zip; ` + Remove-Item C:\gitsetup.7z.exe; ` + ` + Write-Host INFO: Creating source directory...; ` + New-Item -ItemType Directory -Path C:\go\src\github.com\docker\docker | Out-Null; ` + ` + Write-Host INFO: Configuring git core.autocrlf...; ` + C:\git\bin\git config --global core.autocrlf true; ` + ` + Write-Host INFO: Completed + +# Make PowerShell the default entrypoint +ENTRYPOINT ["powershell.exe"] + +# Set the working directory to the location of the sources +WORKDIR C:\go\src\github.com\docker\docker + +# Copy the sources into the container +COPY . . diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE index 9c8e20ab85..8f3fee627a 100644 --- a/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2017 Docker, Inc. + Copyright 2013-2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS new file mode 100644 index 0000000000..39bb8c1308 --- /dev/null +++ b/vendor/github.com/docker/docker/MAINTAINERS @@ -0,0 +1,376 @@ +# Docker maintainers file +# +# This file describes who runs the docker/docker project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant +# parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + + [Org."Core maintainers"] + + # The Core maintainers are the ghostbusters of the project: when there's a problem others + # can't solve, they show up and fix it with bizarre devices and weaponry. + # They have final say on technical implementation and coding style. + # They are ultimately responsible for quality in all its forms: usability polish, + # bugfixes, performance, stability, etc. When ownership can cleanly be passed to + # a subsystem, they are responsible for doing so and holding the + # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. + + # For each release (including minor releases), a "release captain" is assigned from the + # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure + # the release process is clear and up-to-date. + + people = [ + "aaronlehmann", + "akihirosuda", + "aluzzardi", + "anusha", + "coolljt0725", + "cpuguy83", + "crosbymichael", + "dnephin", + "duglin", + "estesp", + "icecrime", + "jhowardmsft", + "justincormack", + "lk4d4", + "mavenugo", + "mhbauer", + "mlaventure", + "mrjana", + "runcom", + "stevvooe", + "tianon", + "tibor", + "tonistiigi", + "unclejack", + "vdemeester", + "vieux" + ] + + [Org."Docs maintainers"] + + # TODO Describe the docs maintainers role. + + people = [ + "jamtur01", + "misty", + "sven", + "thajeztah" + ] + + [Org.Curators] + + # The curators help ensure that incoming issues and pull requests are properly triaged and + # that our various contribution and reviewing processes are respected. With their knowledge of + # the repository activity, they can also guide contributors to relevant material or + # discussions. + # + # They are neither code nor docs reviewers, so they are never expected to merge. They can + # however: + # - close an issue or pull request when it's an exact duplicate + # - close an issue or pull request when it's inappropriate or off-topic + + people = [ + "aboch", + "andrewhsu", + "ehazlett", + "mgoelzer", + "programmerq", + "thajeztah" + ] + + [Org.Alumni] + + # This list contains maintainers that are no longer active on the project. + # It is thanks to these people that the project has become what it is today. + # Thank you! + + people = [ + # David Calavera contributed many features to Docker, such as an improved + # event system, dynamic configuration reloading, volume plugins, fancy + # new templating options, and an external client credential store. As a + # maintainer, David was release captain for Docker 1.8, and competing + # with Jess Frazelle to be "top dream killer". + # David is now doing amazing stuff as CTO for https://www.netlify.com, + # and tweets as @calavera. + "calavera", + + # As a maintainer, Erik was responsible for the "builder", and + # started the first designs for the new networking model in + # Docker. Erik is now working on all kinds of plugins for Docker + # (https://github.com/contiv) and various open source projects + # in his own repository https://github.com/erikh. You may + # still stumble into him in our issue tracker, or on IRC. + "erikh", + + # Jessica Frazelle, also known as the "Keyser Söze of containers", + # runs *everything* in containers. She started contributing to + # Docker with a (fun fun) change involving both iptables and regular + # expressions (coz, YOLO!) on July 10, 2014 + # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. + # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed + # many features and improvement, among which "seccomp profiles" (making + # containers a lot more secure). Besides being a maintainer, she + # set up the CI infrastructure for the project, giving everyone + # something to shout at if a PR failed ("noooo Janky!"). + # Jess is currently working on the DCOS security team at Mesosphere, + # and contributing to various open source projects. + # Be sure you don't miss her talks at a conference near you (a must-see), + # read her blog at https://blog.jessfraz.com (a must-read), and + # check out her open source projects on GitHub https://github.com/jessfraz (a must-try). + "jessfraz", + + # As a docs maintainer, Mary Anthony contributed greatly to the Docker + # docs. She wrote the Docker Contributor Guide and Getting Started + # Guides. She helped create a doc build system independent of + # docker/docker project, and implemented a new docs.docker.com theme and + # nav for 2015 Dockercon. Fun fact: the most inherited layer in DockerHub + # public repositories was originally referenced in + # maryatdocker/docker-whale back in May 2015. + "moxiegirl", + + # Vincent "vbatts!" Batts made his first contribution to the project + # in November 2013, to become a maintainer a few months later, on + # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). + # As a maintainer, Vincent made important contributions to core elements + # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). + # He also contributed the "tar-split" library, an important element + # for the content-addressable store. + # Vincent is currently a member of the Open Containers Initiative + # Technical Oversight Board (TOB), besides his work at Red Hat and + # Project Atomic. You can still find him regularly hanging out in + # our repository and the #docker-dev and #docker-maintainers IRC channels + # for a chat, as he's always a lot of fun. + "vbatts", + + # Vishnu became a maintainer to help out on the daemon codebase and + # libcontainer integration. He's currently involved in the + # Open Containers Initiative, working on the specifications, + # besides his work on cAdvisor and Kubernetes for Google. + "vishh" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + + [people.anusha] + Name = "Anusha Ragunathan" + Email = "anusha@docker.com" + GitHub = "anusha-ragunathan" + + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" + + [people.coolljt0725] + Name = "Lei Jitang" + Email = "leijitang@huawei.com" + GitHub = "coolljt0725" + + [people.cpuguy83] + Name = "Brian Goff" + Email = "cpuguy83@gmail.com" + Github = "cpuguy83" + + [people.crosbymichael] + Name = "Michael Crosby" + Email = "crosbymichael@gmail.com" + GitHub = "crosbymichael" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.duglin] + Name = "Doug Davis" + Email = "dug@us.ibm.com" + GitHub = "duglin" + + [people.ehazlett] + Name = "Evan Hazlett" + Email = "ejhazlett@gmail.com" + GitHub = "ehazlett" + + [people.erikh] + Name = "Erik Hollensbe" + Email = "erik@docker.com" + GitHub = "erikh" + + [people.estesp] + Name = "Phil Estes" + Email = "estesp@linux.vnet.ibm.com" + GitHub = "estesp" + + [people.icecrime] + Name = "Arnaud Porterie" + Email = "arnaud@docker.com" + GitHub = "icecrime" + + [people.jamtur01] + Name = "James Turnbull" + Email = "james@lovedthanlost.net" + GitHub = "jamtur01" + + [people.jhowardmsft] + Name = "John Howard" + Email = "jhoward@microsoft.com" + GitHub = "jhowardmsft" + + [people.jessfraz] + Name = "Jessie Frazelle" + Email = "jess@linux.com" + GitHub = "jessfraz" + + [people.justincormack] + Name = "Justin Cormack" + Email = "justin.cormack@docker.com" + GitHub = "justincormack" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.mavenugo] + Name = "Madhu Venugopal" + Email = "madhu@docker.com" + GitHub = "mavenugo" + + [people.mgoelzer] + Name = "Mike Goelzer" + Email = "mike.goelzer@docker.com" + GitHub = "mgoelzer" + + [people.mhbauer] + Name = "Morgan Bauer" + Email = "mbauer@us.ibm.com" + GitHub = "mhbauer" + + [people.misty] + Name = "Misty Stanley-Jones" + Email = "misty@docker.com" + GitHub = "mstanleyjones" + + [people.mlaventure] + Name = "Kenfe-Mickaël Laventure" + Email = "mickael.laventure@docker.com" + GitHub = "mlaventure" + + [people.moxiegirl] + Name = "Mary Anthony" + Email = "mary.anthony@docker.com" + GitHub = "moxiegirl" + + [people.mrjana] + Name = "Jana Radhakrishnan" + Email = "mrjana@docker.com" + GitHub = "mrjana" + + [people.programmerq] + Name = "Jeff Anderson" + Email = "jeff@docker.com" + GitHub = "programmerq" + + [people.runcom] + Name = "Antonio Murdaca" + Email = "runcom@redhat.com" + GitHub = "runcom" + + [people.shykes] + Name = "Solomon Hykes" + Email = "solomon@docker.com" + GitHub = "shykes" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" + + [people.sven] + Name = "Sven Dowideit" + Email = "SvenDowideit@home.org.au" + GitHub = "SvenDowideit" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.tianon] + Name = "Tianon Gravi" + Email = "admwiggin@gmail.com" + GitHub = "tianon" + + [people.tibor] + Name = "Tibor Vass" + Email = "tibor@docker.com" + GitHub = "tiborvass" + + [people.tonistiigi] + Name = "Tõnis Tiigi" + Email = "tonis@docker.com" + GitHub = "tonistiigi" + + [people.unclejack] + Name = "Cristian Staretu" + Email = "cristian.staretu@gmail.com" + GitHub = "unclejack" + + [people.vbatts] + Name = "Vincent Batts" + Email = "vbatts@redhat.com" + GitHub = "vbatts" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" + + [people.vishh] + Name = "Vishnu Kannan" + Email = "vishnuk@google.com" + GitHub = "vishh" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile new file mode 100644 index 0000000000..81bde6b4f6 --- /dev/null +++ b/vendor/github.com/docker/docker/Makefile @@ -0,0 +1,147 @@ +.PHONY: all binary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration-cli test-unit tgz validate win + +# set the graph driver as the current graphdriver if not set +DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) + +# get OS/Arch of docker engine +DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}') +DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') + +# env vars passed through directly to Docker's build scripts +# to allow things like `make KEEPBUNDLE=1 binary` easily +# `project/PACKAGERS.md` have some limited documentation of some of these +DOCKER_ENVS := \ + -e BUILD_APT_MIRROR \ + -e BUILDFLAGS \ + -e KEEPBUNDLE \ + -e DOCKER_BUILD_ARGS \ + -e DOCKER_BUILD_GOGC \ + -e DOCKER_BUILD_PKGS \ + -e DOCKER_DEBUG \ + -e DOCKER_EXPERIMENTAL \ + -e DOCKER_GITCOMMIT \ + -e DOCKER_GRAPHDRIVER=$(DOCKER_GRAPHDRIVER) \ + -e DOCKER_INCREMENTAL_BINARY \ + -e DOCKER_PORT \ + -e DOCKER_REMAP_ROOT \ + -e DOCKER_STORAGE_OPTS \ + -e DOCKER_USERLANDPROXY \ + -e TESTDIRS \ + -e TESTFLAGS \ + -e TIMEOUT \ + -e HTTP_PROXY \ + -e HTTPS_PROXY \ + -e NO_PROXY \ + -e http_proxy \ + -e https_proxy \ + -e no_proxy +# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds + +# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` +# (default to no bind mount if DOCKER_HOST is set) +# note: BINDDIR is supported for backwards-compatibility here +BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) +DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") + +# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. +# The volume will be cleaned up when the container is removed due to `--rm`. +# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. +DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docker/docker/bundles) + +# enable .go-pkg-cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set +PKGCACHE_DIR := $(if $(PKGCACHE_DIR),$(PKGCACHE_DIR),.go-pkg-cache) +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(CURDIR)/$(PKGCACHE_DIR)/\1"@g'),$(DOCKER_MOUNT)) + +GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) +GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") +DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) +DOCKER_PORT_FORWARD := $(if $(DOCKER_PORT),-p "$(DOCKER_PORT)",) + +DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) $(DOCKER_PORT_FORWARD) +BUILD_APT_MIRROR := $(if $(DOCKER_BUILD_APT_MIRROR),--build-arg APT_MIRROR=$(DOCKER_BUILD_APT_MIRROR)) +export BUILD_APT_MIRROR + +# if this session isn't interactive, then we don't want to allocate a +# TTY, which would fail, but if it is interactive, we do want to attach +# so that the user can send e.g. ^C through. +INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) +ifeq ($(INTERACTIVE), 1) + DOCKER_FLAGS += -t +endif + +DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" + +default: binary + +all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' + +binary: build ## build the linux binaries + $(DOCKER_RUN_DOCKER) hack/make.sh binary + +build: bundles init-go-pkg-cache + docker build ${BUILD_APT_MIRROR} ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . + +bundles: + mkdir bundles + +cross: build ## cross build the binaries for darwin, freebsd and\nwindows + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross + +deb: build ## build the deb packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb + + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) + +init-go-pkg-cache: + mkdir -p $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^: ]*):[^ ]*@$(PKGCACHE_DIR)/\1@g') + +install: ## install the linux binaries + KEEPBUNDLE=1 hack/make.sh install-binary + +manpages: ## Generate man pages from go source and markdown + docker build -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man + docker run --rm \ + -v $(PWD):/go/src/github.com/docker/docker/ \ + docker-manpage-dev + +rpm: build ## build the rpm packages + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm + +run: build ## run the docker daemon in a container + $(DOCKER_RUN_DOCKER) sh -c "KEEPBUNDLE=1 hack/make.sh install-binary run" + +shell: build ## start a shell inside the build env + $(DOCKER_RUN_DOCKER) bash + +test: build ## run the unit, integration and docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py + +test-docker-py: build ## run the docker-py tests + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py + +test-integration-cli: build ## run the integration tests + $(DOCKER_RUN_DOCKER) hack/make.sh build-integration-test-binary dynbinary test-integration-cli + +test-unit: build ## run the unit tests + $(DOCKER_RUN_DOCKER) hack/make.sh test-unit + +tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries + $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz + +validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor + $(DOCKER_RUN_DOCKER) hack/validate/all + +win: build ## cross build the binary for windows + $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger:0.7.4 diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE index 0c74e15b05..8a37c1c7bc 100644 --- a/vendor/github.com/docker/docker/NOTICE +++ b/vendor/github.com/docker/docker/NOTICE @@ -1,5 +1,5 @@ Docker -Copyright 2012-2017 Docker, Inc. +Copyright 2012-2016 Docker, Inc. This product includes software developed at Docker, Inc. (https://www.docker.com). diff --git a/vendor/github.com/docker/docker/README.md b/vendor/github.com/docker/docker/README.md new file mode 100644 index 0000000000..0b33bdca0d --- /dev/null +++ b/vendor/github.com/docker/docker/README.md @@ -0,0 +1,304 @@ +Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) +============================ + +Docker is an open source project to pack, ship and run any application +as a lightweight container. + +Docker containers are both *hardware-agnostic* and *platform-agnostic*. +This means they can run anywhere, from your laptop to the largest +cloud compute instance and everything in between - and they don't require +you to use a particular language, framework or packaging system. That +makes them great building blocks for deploying and scaling web apps, +databases, and backend services without depending on a particular stack +or provider. + +Docker began as an open-source implementation of the deployment engine which +powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/), +a popular Platform-as-a-Service. It benefits directly from the experience +accumulated over several years of large-scale operation and support of hundreds +of thousands of applications and databases. + +![Docker logo](docs/static_files/docker-logo-compressed.png "Docker") + +## Security Disclosure + +Security is very important to us. If you have any issue regarding security, +please disclose the information responsibly by sending an email to +security@docker.com and not by creating a GitHub issue. + +## Better than VMs + +A common method for distributing applications and sandboxing their +execution is to use virtual machines, or VMs. Typical VM formats are +VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory +these formats should allow every developer to automatically package +their application into a "machine" for easy distribution and deployment. +In practice, that almost never happens, for a few reasons: + + * *Size*: VMs are very large which makes them impractical to store + and transfer. + * *Performance*: running VMs consumes significant CPU and memory, + which makes them impractical in many scenarios, for example local + development of multi-tier applications, and large-scale deployment + of cpu and memory-intensive applications on large numbers of + machines. + * *Portability*: competing VM environments don't play well with each + other. Although conversion tools do exist, they are limited and + add even more overhead. + * *Hardware-centric*: VMs were designed with machine operators in + mind, not software developers. As a result, they offer very + limited tooling for what developers need most: building, testing + and running their software. For example, VMs offer no facilities + for application versioning, monitoring, configuration, logging or + service discovery. + +By contrast, Docker relies on a different sandboxing method known as +*containerization*. Unlike traditional virtualization, containerization +takes place at the kernel level. Most modern operating system kernels +now support the primitives necessary for containerization, including +Linux with [openvz](https://openvz.org), +[vserver](http://linux-vserver.org) and more recently +[lxc](https://linuxcontainers.org/), Solaris with +[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), +and FreeBSD with +[Jails](https://www.freebsd.org/doc/handbook/jails.html). + +Docker builds on top of these low-level primitives to offer developers a +portable format and runtime environment that solves all four problems. +Docker containers are small (and their transfer can be optimized with +layers), they have basically zero memory and cpu overhead, they are +completely portable, and are designed from the ground up with an +application-centric design. + +Perhaps best of all, because Docker operates at the OS level, it can still be +run inside a VM! + +## Plays well with others + +Docker does not require you to buy into a particular programming +language, framework, packaging system, or configuration language. + +Is your application a Unix process? Does it use files, tcp connections, +environment variables, standard Unix streams and command-line arguments +as inputs and outputs? Then Docker can run it. + +Can your application's build be expressed as a sequence of such +commands? Then Docker can build it. + +## Escape dependency hell + +A common problem for developers is the difficulty of managing all +their application's dependencies in a simple and automated way. + +This is usually difficult for several reasons: + + * *Cross-platform dependencies*. Modern applications often depend on + a combination of system libraries and binaries, language-specific + packages, framework-specific modules, internal components + developed for another project, etc. These dependencies live in + different "worlds" and require different tools - these tools + typically don't work well with each other, requiring awkward + custom integrations. + + * *Conflicting dependencies*. Different applications may depend on + different versions of the same dependency. Packaging tools handle + these situations with various degrees of ease - but they all + handle them in different and incompatible ways, which again forces + the developer to do extra work. + + * *Custom dependencies*. A developer may need to prepare a custom + version of their application's dependency. Some packaging systems + can handle custom versions of a dependency, others can't - and all + of them handle it differently. + + +Docker solves the problem of dependency hell by giving the developer a simple +way to express *all* their application's dependencies in one place, while +streamlining the process of assembling them. If this makes you think of +[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't +*replace* your favorite packaging systems. It simply orchestrates +their use in a simple and repeatable way. How does it do that? With +layers. + +Docker defines a build as running a sequence of Unix commands, one +after the other, in the same container. Build commands modify the +contents of the container (usually by installing new files on the +filesystem), the next command modifies it some more, etc. Since each +build command inherits the result of the previous commands, the +*order* in which the commands are executed expresses *dependencies*. + +Here's a typical Docker build process: + +```bash +FROM ubuntu:12.04 +RUN apt-get update && apt-get install -y python python-pip curl +RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv +RUN cd helloflask-master && pip install -r requirements.txt +``` + +Note that Docker doesn't care *how* dependencies are built - as long +as they can be built by running a Unix command in a container. + + +Getting started +=============== + +Docker can be installed either on your computer for building applications or +on servers for running them. To get started, [check out the installation +instructions in the +documentation](https://docs.docker.com/engine/installation/). + +Usage examples +============== + +Docker can be used to run short-lived commands, long-running daemons +(app servers, databases, etc.), interactive shell sessions, etc. + +You can find a [list of real-world +examples](https://docs.docker.com/engine/examples/) in the +documentation. + +Under the hood +-------------- + +Under the hood, Docker is built on the following components: + +* The + [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) + and + [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) + capabilities of the Linux kernel +* The [Go](https://golang.org) programming language +* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) +* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) + +Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) +====================== + +| **Master** (Linux) | **Experimental** (Linux) | **Windows** | **FreeBSD** | +|------------------|----------------------|---------|---------| +| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | + +Want to hack on Docker? Awesome! We have [instructions to help you get +started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). + +These instructions are probably not perfect, please let us know if anything +feels wrong or incomplete. Better yet, submit a PR and improve them yourself. + +Getting the development builds +============================== + +Want to run Docker from a master build? You can download +master builds at [master.dockerproject.org](https://master.dockerproject.org). +They are updated with each commit merged into the master branch. + +Don't know how to use that super cool new feature in the master build? Check +out the master docs at +[docs.master.dockerproject.org](http://docs.master.dockerproject.org). + +How the project is run +====================== + +Docker is a very, very active project. If you want to learn more about how it is run, +or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). + +We are always open to suggestions on process improvements, and are always looking for more maintainers. + +### Talking to other Docker users and contributors + + + + + + + + + + + + + + + + + + + + + + + + +
Internet Relay Chat (IRC) +

+ IRC is a direct line to our most knowledgeable Docker users; we have + both the #docker and #docker-dev group on + irc.freenode.net. + IRC is a rich chat protocol but it can overwhelm new users. You can search + our chat archives. +

+ Read our IRC quickstart guide for an easy way to get started. +
Docker Community Forums + The Docker Engine + group is for users of the Docker Engine project. +
Google Groups + The docker-dev group is for contributors and other people + contributing to the Docker project. You can join this group without a + Google account by sending an email to docker-dev+subscribe@googlegroups.com. + You'll receive a join-request message; simply reply to the message to + confirm your subscription. +
Twitter + You can follow Docker's Twitter feed + to get updates on our products. You can also tweet us questions or just + share blogs or stories. +
Stack Overflow + Stack Overflow has over 7000 Docker questions listed. We regularly + monitor Docker questions + and so do many other knowledgeable Docker users. +
+ +### Legal + +*Brought to you courtesy of our legal counsel. For more context, +please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. + +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + + +Licensing +========= +Docker is licensed under the Apache License, Version 2.0. See +[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full +license text. + +Other Docker Related Projects +============================= +There are a number of projects under development that are based on Docker's +core technology. These projects expand the tooling built around the +Docker platform to broaden its application and utility. + +* [Docker Registry](https://github.com/docker/distribution): Registry +server for Docker (hosting/delivery of repositories and images) +* [Docker Machine](https://github.com/docker/machine): Machine management +for a container-centric world +* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering +system +* [Docker Compose](https://github.com/docker/compose) (formerly Fig): +Define and run multi-container apps +* [Kitematic](https://github.com/docker/kitematic): The easiest way to use +Docker on Mac and Windows + +If you know of another project underway that should be listed here, please help +us keep this list up-to-date by submitting a PR. + +Awesome-Docker +============== +You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/vendor/github.com/docker/docker/ROADMAP.md b/vendor/github.com/docker/docker/ROADMAP.md new file mode 100644 index 0000000000..21fe06dba6 --- /dev/null +++ b/vendor/github.com/docker/docker/ROADMAP.md @@ -0,0 +1,118 @@ +Docker Engine Roadmap +===================== + +### How should I use this document? + +This document provides description of items that the project decided to prioritize. This should +serve as a reference point for Docker contributors to understand where the project is going, and +help determine if a contribution could be conflicting with some longer terms plans. + +The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be +refused (except for those mentioned as "frozen features" below)! We are always happy to receive +patches for new cool features we haven't thought about, or didn't judge priority. Please however +understand that such patches might take longer for us to review. + +### How can I help? + +Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described +in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our +goal is to split down the workload in such way that anybody can jump in and help. Please comment on +issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already +assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is +the best way to go. + +### How can I add something to the roadmap? + +The roadmap process is new to the Docker Engine: we are only beginning to structure and document the +project objectives. Our immediate goal is to be more transparent, and work with our community to +focus our efforts on fewer prioritized topics. + +We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but +we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we +won't be accepting pull requests adding or removing items from this file. + +# 1. Features and refactoring + +## 1.1 Runtime improvements + +We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container +execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional +default libcontainer `execdriver`, but the Engine internals were not ready for this. + +As runC continued evolving, and the OCI specification along with it, we created +[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is +the new target for Engine integration, as it can entirely replace the whole `execdriver` +architecture, and container monitoring along with it. + +Docker Engine will rely on a long-running `containerd` companion daemon for all container execution +related operations. This could open the door in the future for Engine restarts without interrupting +running containers. + +## 1.2 Plugins improvements + +Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks +extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real +world feedback before optimizing for any particular workflow. + +In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of +plugins. This implies in particular making it trivially easy to distribute plugins as containers +through any Registry instance, as well as solving the commonly heard pain points of plugins needing +to be treated as somewhat special (being active at all time, started before any other user +containers, and not as easily dismissed). + +## 1.3 Internal decoupling + +A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the +API implementation has been refactored, and the Builder side of the daemon is now +[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in +the same repository. + +We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the +runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support +with the concept of "special" containers opens the door for bootstrapping more Engine internals +using the same facilities. + +## 1.4 Cluster capable Engine + +The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent +adding features such as multihost networking, and node discovery down at the Engine level. Yet, the +Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm +for that. + +We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the +Docker Engine being already capable of discovering each other and establish overlay networking for +their container to communicate, the next step is for a given Engine to gain ability to dispatch work +to another node in the cluster. This will be introduced in a backward compatible way, such that a +`docker run` invocation on a particular node remains fully deterministic. + +# 2 Frozen features + +## 2.1 Docker exec + +We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a +*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. + +## 2.2 Remote Registry Operations + +A large amount of work is ongoing in the area of image distribution and provenance. This includes +moving to the V2 Registry API and heavily refactoring the code that powers these features. The +desired result is more secure, reliable and easier to use image distribution. + +Part of the problem with this part of the code base is the lack of a stable and flexible interface. +If new features are added that access the registry without solidifying these interfaces, achieving +feature parity will continue to be elusive. While we get a handle on this situation, we are imposing +a moratorium on new code that accesses the Registry API in commands that don't already make remote +calls. + +Currently, only the following commands cause interaction with a remote registry: + + - push + - pull + - run + - build + - search + - login + +In the interest of stabilizing the registry access model during this ongoing work, we are not +accepting additions to other commands that will cause remote interaction with the Registry API. This +moratorium will lift when the goals of the distribution project have been met. diff --git a/vendor/github.com/docker/docker/VENDORING.md b/vendor/github.com/docker/docker/VENDORING.md new file mode 100644 index 0000000000..3086f9d172 --- /dev/null +++ b/vendor/github.com/docker/docker/VENDORING.md @@ -0,0 +1,45 @@ +# Vendoring policies + +This document outlines recommended Vendoring policies for Docker repositories. +(Example, libnetwork is a Docker repo and logrus is not.) + +## Vendoring using tags + +Commit ID based vendoring provides little/no information about the updates +vendored. To fix this, vendors will now require that repositories use annotated +tags along with commit ids to snapshot commits. Annotated tags by themselves +are not sufficient, since the same tag can be force updated to reference +different commits. + +Each tag should: +- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") +- Have a corresponding entry in the change tracking document. + +Each repo should: +- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, +github releases file. + +The goal here is for consuming repos to be able to use the tag version and +changelog updates to determine whether the vendoring will cause any breaking or +backward incompatible changes. This also means that repos can specify having +dependency on a package of a specific version or greater up to the next major +release, without encountering breaking changes. + +## Semantic Versioning +Annotated version tags should follow Schema Versioning policies. +According to http://semver.org: + +"Given a version number MAJOR.MINOR.PATCH, increment the: + MAJOR version when you make incompatible API changes, + MINOR version when you add functionality in a backwards-compatible manner, and + PATCH version when you make backwards-compatible bug fixes. +Additional labels for pre-release and build metadata are available as extensions +to the MAJOR.MINOR.PATCH format." + +## Vendoring cadence +In order to avoid huge vendoring changes, it is recommended to have a regular +cadence for vendoring updates. e.g. monthly. + +## Pre-merge vendoring tests +All related repos will be vendored into docker/docker. +CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/vendor/github.com/docker/docker/VERSION b/vendor/github.com/docker/docker/VERSION new file mode 100644 index 0000000000..b50dd27dd9 --- /dev/null +++ b/vendor/github.com/docker/docker/VERSION @@ -0,0 +1 @@ +1.13.1 diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 0000000000..464e056958 --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. To automatically generate documentation. +2. To automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +All the documentation generation is done in the documentation repository, [docker/docker.github.io](https://github.com/docker/docker.github.io). The Swagger definition is vendored periodically into this repository, but you can manually copy over the Swagger definition to test changes. + +Copy `api/swagger.yaml` in this repository to `engine/api/[VERSION_NUMBER]/swagger.yaml` in the documentation repository, overwriting what is already there. Then, run `docker-compose up` in the documentation repository and browse to [http://localhost:4000/engine/api/](http://localhost:4000/engine/api/) when it finishes rendering. diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 0000000000..fd065d5abe --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,166 @@ +package api + +import ( + "encoding/json" + "encoding/pem" + "fmt" + "mime" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + + "github.com/Sirupsen/logrus" + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/system" + "github.com/docker/libtrust" +) + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion string = "1.26" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier string = "scratch" +) + +// byPortInfo is a temporary type used to sort types.Port by its fields +type byPortInfo []types.Port + +func (r byPortInfo) Len() int { return len(r) } +func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } +func (r byPortInfo) Less(i, j int) bool { + if r[i].PrivatePort != r[j].PrivatePort { + return r[i].PrivatePort < r[j].PrivatePort + } + + if r[i].IP != r[j].IP { + return r[i].IP < r[j].IP + } + + if r[i].PublicPort != r[j].PublicPort { + return r[i].PublicPort < r[j].PublicPort + } + + return r[i].Type < r[j].Type +} + +// DisplayablePorts returns formatted string representing open ports of container +// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" +// it's used by command 'docker ps' +func DisplayablePorts(ports []types.Port) string { + type portGroup struct { + first uint16 + last uint16 + } + groupMap := make(map[string]*portGroup) + var result []string + var hostMappings []string + var groupMapKeys []string + sort.Sort(byPortInfo(ports)) + for _, port := range ports { + current := port.PrivatePort + portKey := port.Type + if port.IP != "" { + if port.PublicPort != current { + hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) + continue + } + portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) + } + group := groupMap[portKey] + + if group == nil { + groupMap[portKey] = &portGroup{first: current, last: current} + // record order that groupMap keys are created + groupMapKeys = append(groupMapKeys, portKey) + continue + } + if current == (group.last + 1) { + group.last = current + continue + } + + result = append(result, formGroup(portKey, group.first, group.last)) + groupMap[portKey] = &portGroup{first: current, last: current} + } + for _, portKey := range groupMapKeys { + g := groupMap[portKey] + result = append(result, formGroup(portKey, g.first, g.last)) + } + result = append(result, hostMappings...) + return strings.Join(result, ", ") +} + +func formGroup(key string, start, last uint16) string { + parts := strings.Split(key, "/") + groupType := parts[0] + var ip string + if len(parts) > 1 { + ip = parts[0] + groupType = parts[1] + } + group := strconv.Itoa(int(start)) + if start != last { + group = fmt.Sprintf("%s-%d", group, last) + } + if ip != "" { + group = fmt.Sprintf("%s:%s->%s", ip, group, group) + } + return fmt.Sprintf("%s/%s", group, groupType) +} + +// MatchesContentType validates the content type against the expected one +func MatchesContentType(contentType, expectedType string) bool { + mimetype, _, err := mime.ParseMediaType(contentType) + if err != nil { + logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) + } + return err == nil && mimetype == expectedType +} + +// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, +// otherwise generates a new one +func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { + err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) + if err != nil { + return nil, err + } + trustKey, err := libtrust.LoadKeyFile(trustKeyPath) + if err == libtrust.ErrKeyFileDoesNotExist { + trustKey, err = libtrust.GenerateECP256PrivateKey() + if err != nil { + return nil, fmt.Errorf("Error generating key: %s", err) + } + encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) + if err != nil { + return nil, fmt.Errorf("Error serializing key: %s", err) + } + if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { + return nil, fmt.Errorf("Error saving key file: %s", err) + } + } else if err != nil { + return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) + } + return trustKey, nil +} + +func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { + if ext == ".json" || ext == ".jwk" { + encoded, err = json.Marshal(key) + if err != nil { + return nil, fmt.Errorf("unable to encode private key JWK: %s", err) + } + } else { + pemBlock, err := key.PEMBlock() + if err != nil { + return nil, fmt.Errorf("unable to encode private key PEM: %s", err) + } + encoded = pem.EncodeToMemory(pemBlock) + } + return +} diff --git a/vendor/github.com/docker/docker/api/common_test.go b/vendor/github.com/docker/docker/api/common_test.go new file mode 100644 index 0000000000..31d6f58253 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_test.go @@ -0,0 +1,341 @@ +package api + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "os" + + "github.com/docker/docker/api/types" +) + +type ports struct { + ports []types.Port + expected string +} + +// DisplayablePorts +func TestDisplayablePorts(t *testing.T) { + cases := []ports{ + { + []types.Port{ + { + PrivatePort: 9988, + Type: "tcp", + }, + }, + "9988/tcp"}, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp", + }, + { + []types.Port{ + { + IP: "0.0.0.0", + PrivatePort: 9988, + Type: "tcp", + }, + }, + "0.0.0.0:0->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, + }, + "4.3.2.1:8899->9988/tcp", + }, + { + []types.Port{ + { + IP: "4.3.2.1", + PrivatePort: 9988, + PublicPort: 9988, + Type: "tcp", + }, + }, + "4.3.2.1:9988->9988/tcp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + Type: "udp", + }, { + PrivatePort: 9988, + Type: "udp", + }, + }, + "9988/udp, 9988/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 9998, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 9999, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:9998-9999->9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PublicPort: 8887, + PrivatePort: 9998, + Type: "udp", + }, { + IP: "1.2.3.4", + PublicPort: 8888, + PrivatePort: 9999, + Type: "udp", + }, + }, + "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", + }, + { + []types.Port{ + { + PrivatePort: 9998, + Type: "udp", + }, { + PrivatePort: 9999, + Type: "udp", + }, + }, + "9998-9999/udp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, + }, + "9988/udp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 9988, + PublicPort: 8899, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", + }, + { + []types.Port{ + { + PrivatePort: 9988, + PublicPort: 8899, + Type: "udp", + }, { + IP: "1.2.3.4", + PrivatePort: 6677, + PublicPort: 7766, + Type: "tcp", + }, { + IP: "4.3.2.1", + PrivatePort: 2233, + PublicPort: 3322, + Type: "tcp", + }, + }, + "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", + }, + { + []types.Port{ + { + PrivatePort: 80, + Type: "tcp", + }, { + PrivatePort: 1024, + Type: "tcp", + }, { + PrivatePort: 80, + Type: "udp", + }, { + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "1.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 80, + PrivatePort: 1024, + Type: "udp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "tcp", + }, { + IP: "2.1.1.1", + PublicPort: 1024, + PrivatePort: 80, + Type: "udp", + }, + }, + "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", + }, + } + + for _, port := range cases { + actual := DisplayablePorts(port.ports) + if port.expected != actual { + t.Fatalf("Expected %s, got %s.", port.expected, actual) + } + } +} + +// MatchesContentType +func TestJsonContentType(t *testing.T) { + if !MatchesContentType("application/json", "application/json") { + t.Fail() + } + + if !MatchesContentType("application/json; charset=utf-8", "application/json") { + t.Fail() + } + + if MatchesContentType("dockerapplication/json", "application/json") { + t.Fail() + } +} + +// LoadOrCreateTrustKey +func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") + if err != nil { + t.Fatal(err) + } + + if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { + t.Fatalf("expected an error, got nothing.") + } + +} + +func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { + tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(tmpKeyFolderPath) + + // Without the need to create the folder hierarchy + tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With the need to create the folder hierarchy as tmpKeyFie is in a path + // where some folders do not exist. + tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat(tmpKeyFile); err != nil { + t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) + } + + // With no path at all + defer os.Remove("keyfile") + if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { + t.Fatalf("expected a new key file, got : %v and %v", err, key) + } + + if _, err := os.Stat("keyfile"); err != nil { + t.Fatalf("Expected to find a file keyfile, got %v", err) + } +} + +func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { + tmpKeyFile := filepath.Join("fixtures", "keyfile") + + if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { + t.Fatalf("expected a key file, got : %v and %v", err, key) + } +} diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 0000000000..081e61c451 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api + +// MinVersion represents Minimum REST API version supported +const MinVersion string = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 0000000000..d930fa0720 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (eg docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 0000000000..f07a02737f --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 0000000000..d19e8c9ca8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,7785 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.26" +info: + title: "Docker Engine API" + version: "1.26" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + + For Docker Engine >= 1.13.1, the API version is 1.26. To lock to this version, you prefix the URL with `/v1.26`. For example, calling `/info` is the same as calling `/v1.26/info`. + + Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + + In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + This documentation is for version 1.26 of the API, which was introduced with Docker 1.13.1. Use this table to find documentation for previous versions of the API: + + Docker version | API version | Changes + ----------------|-------------|--------- + 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) + 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) + 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) + 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) + 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) + 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) + 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) + 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldly. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. See the [networking documentation](https://docs.docker.com/engine/userguide/networking/) for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. See the [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) for more information. + + To exec a command in a container, you first need to create an exec instance, then start it. These two API endpoints are wrapped up in a single command-line command, `docker exec`. + - name: "Secret" + x-displayName: "Secrets" + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. See [the swarm mode documentation](https://docs.docker.com/engine/swarm/) for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit of swarm. Swarm mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs mounts, and their corresponding mount options. For example: `{ "/run": "rw,noexec,nosuid,size=65536k" }`. + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + Config: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]` + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `{}` inherit healthcheck from image or parent image + - `{"NONE"}` disable healthcheck + - `{"CMD", args...}` exec arguments directly + - `{"CMD-SHELL", command}` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriver: + description: "Information about this container's graph driver." + type: "object" + properties: + Name: + type: "string" + Data: + type: "object" + additionalProperties: + type: "string" + + Image: + type: "object" + properties: + Id: + type: "string" + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + Comment: + type: "string" + Created: + type: "string" + Container: + type: "string" + ContainerConfig: + $ref: "#/definitions/Config" + DockerVersion: + type: "string" + Author: + type: "string" + Config: + $ref: "#/definitions/Config" + Architecture: + type: "string" + Os: + type: "string" + Size: + type: "integer" + format: "int64" + VirtualSize: + type: "integer" + format: "int64" + GraphDriver: + $ref: "#/definitions/GraphDriver" + RootFS: + type: "object" + properties: + Type: + type: "string" + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + required: [Size, RefCount] + properties: + Size: + type: "integer" + description: "The disk space used by the volume (local driver only)" + default: -1 + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: "The number of containers referencing this volume." + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PropagatedMount + - Mounts + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + WorkDir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + AllowAllDevices: + type: "boolean" + x-nullable: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + diff_ids: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + Engine: + EngineVersion: "1.13.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: "If set, generate a key and use it to lock data stored on the managers." + type: "boolean" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + EncryptionConfig: + AutoLockManagers: false + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + ContainerSpec: + type: "object" + properties: + Image: + description: "The image name to use for the container." + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + DNSConfig: + description: "Specification for DNS related configurations in resolver configuration file (`resolv.conf`)." + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: "A list of internal resolver variables to be modified (e.g., `debug`, `ndots:3`, etc.)." + type: "array" + items: + type: "string" + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + type: "object" + properties: + NanoCPUs: + description: "CPU limit in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory limit in Bytes." + type: "integer" + format: "int64" + Reservation: + description: "Define resources reservation." + properties: + NanoCPUs: + description: "CPU reservation in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory reservation in Bytes." + type: "integer" + format: "int64" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponse: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: "Base64-url-safe-encoded secret data" + type: "array" + items: + type: "string" + Secret: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" +paths: + /containers/json: + get: + summary: "List containers" + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. + + Available filters: + - `exited=` containers with exit code of `` + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `id=` a container's ID + - `name=` a container's name + - `is-task=`(`true`|`false`) + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/Config" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: "The status of the container. For example, `running` or `exited`." + type: "string" + Running: + description: "Whether this container is running." + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriver" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/Config" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "ContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + type: "object" + properties: + Path: + description: "Path to file that has changed" + type: "string" + Kind: + description: "Kind of change" + type: "integer" + enum: + - 0 + - 1 + - 2 + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used for calculating the CPU usage percentage. It is not the same as the `cpu_stats` field. + operationId: "ContainerStats" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "ContainerArchiveHead" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get an tar archive of a resource in the filesystem of container id." + operationId: "ContainerGetArchive" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "ContainerPutArchive" + consumes: + - "application/x-tar" + - "application/octet-stream" + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. + + Available filters: + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `before`=(`[:]`, `` or ``) + - `since`=(`[:]`, `` or ``) + - `reference`=(`[:]`) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/tar" + default: "application/tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + Id: + type: "string" + Created: + type: "integer" + format: "int64" + CreatedBy: + type: "string" + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + Comment: + type: "string" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were referenced by that image. + + Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "1.13.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.6.3" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.25" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: + - "text/plain" + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/Config" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` + + Images report these events: `delete, import, load, pull, push, save, tag, untag` + + Volumes report these events: `create, mount, unmount, destroy` + + Networks report these events: `create, connect, disconnect, destroy` + + The Docker daemon reports these events: `reload` + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` + - `volume=` volume name or ID + - `network=` network name or ID + - `daemon=` daemon name or ID + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "" + Labels: null + Scope: "" + Options: null + UsageData: + Size: 0 + RefCount: 0 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches all or part of a volume + driver name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Containers: + 39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867: + EndpointID: "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda" + MacAddress: "02:42:ac:11:00:02" + IPv4Address: "172.17.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/sample-volume-plugin" + Tag: "latest" + Active: true + Settings: + Env: + - "DEBUG=0" + Args: null + Devices: null + Config: + Description: "A sample volume plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: "" + User: {} + Network: + Type: "" + Linux: + Capabilities: null + AllowAllDevices: false + Devices: null + Mounts: null + PropagatedMount: "/data" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: "Describes a permission accepted by the user upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + EncryptionConfig: + AutoLockManagers: false + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `name=` + - `label=` + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "server error or node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Delay: 30000000000 + Parallelism: 2 + FailureAction: "pause" + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ImageDeleteResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. + + **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + produces: + - "application/vnd.docker.raw-stream" + - "application/json" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "details" + in: "query" + description: "Show extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the secrets list. Available filters: + + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created secret." + type: "string" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + 406: + description: "server error or node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + example: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: "The spec of the secret to update. Currently, only the Labels field can be updated. All other fields must remain unchanged from the [SecretInspect endpoint](#operation/SecretInspect) response values." + - name: "version" + in: "query" + description: "The version number of the secret object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: ["Secret"] diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index 93ca428540..7900d64f0d 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -4,10 +4,11 @@ import ( "bufio" "io" "net" + "os" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container @@ -74,7 +75,6 @@ type ContainerLogsOptions struct { ShowStdout bool ShowStderr bool Since string - Until string Timestamps bool Follow bool Tail string @@ -98,7 +98,6 @@ type ContainerStartOptions struct { // about files to copy into a container type CopyToContainerOptions struct { AllowOverwriteDirWithFile bool - CopyUIDGID bool } // EventsOptions holds parameters to filter events with. @@ -161,10 +160,9 @@ type ImageBuildOptions struct { ShmSize int64 Dockerfile string Ulimits []*units.Ulimit - // BuildArgs needs to be a *string instead of just a string so that - // we can tell the difference between "" (empty string) and no value - // at all (nil). See the parsing of buildArgs in - // api/server/router/build/build_routes.go for even more info. + // See the parsing of buildArgs in api/server/router/build/build_routes.go + // for an explaination of why BuildArgs needs to use *string instead of + // just a string BuildArgs map[string]*string AuthConfigs map[string]AuthConfig Context io.Reader @@ -177,10 +175,6 @@ type ImageBuildOptions struct { // specified here do not need to have a valid parent chain to match cache. CacheFrom []string SecurityOpt []string - ExtraHosts []string // List of extra hosts - Target string - SessionID string - Platform string } // ImageBuildResponse holds information @@ -193,22 +187,20 @@ type ImageBuildResponse struct { // ImageCreateOptions holds information to create images. type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. - Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry } // ImageImportSource holds source information for ImageImport type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. - SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. + Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) + SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) } // ImageImportOptions holds information to import images from the client host. type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image - Platform string // Platform is the target platform of the image + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image } // ImageListOptions holds parameters to filter the list of images with. @@ -229,7 +221,6 @@ type ImagePullOptions struct { All bool RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry PrivilegeFunc RequestPrivilegeFunc - Platform string } // RequestPrivilegeFunc is a function interface that @@ -265,6 +256,18 @@ type ResizeOptions struct { Width uint } +// VersionResponse holds version information for the client and the server +type VersionResponse struct { + Client *Version + Server *Version +} + +// ServerOK returns true when the client could connect to the docker server +// and parse the information received. It returns false otherwise. +func (v VersionResponse) ServerOK() bool { + return v.Server != nil +} + // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { Filters filters.Args @@ -282,12 +285,6 @@ type ServiceCreateOptions struct { // // This field follows the format of the X-Registry-Auth header. EncodedRegistryAuth string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool } // ServiceCreateResponse contains the information returned to a client @@ -321,32 +318,14 @@ type ServiceUpdateOptions struct { // credentials if they are not given in EncodedRegistryAuth. Valid // values are "spec" and "previous-spec". RegistryAuthFrom string - - // Rollback indicates whether a server-side rollback should be - // performed. When this is set, the provided spec will be ignored. - // The valid values are "previous" and "none". An empty value is the - // same as "none". - Rollback string - - // QueryRegistry indicates whether the service update requires - // contacting a registry. A registry may be contacted to retrieve - // the image digest and manifest, which in turn can be used to update - // platform or other information about the service. - QueryRegistry bool } -// ServiceListOptions holds parameters to list services with. +// ServiceListOptions holds parameters to list services with. type ServiceListOptions struct { Filters filters.Args } -// ServiceInspectOptions holds parameters related to the "service inspect" -// operation. -type ServiceInspectOptions struct { - InsertDefaults bool -} - -// TaskListOptions holds parameters to list tasks with. +// TaskListOptions holds parameters to list tasks with. type TaskListOptions struct { Filters filters.Args } @@ -377,6 +356,15 @@ type PluginInstallOptions struct { Args []string } +// SecretRequestOption is a type for requesting secrets +type SecretRequestOption struct { + Source string + Target string + UID string + GID string + Mode os.FileMode +} + // SwarmUnlockKeyResponse contains the response for Engine API: // GET /swarm/unlockkey type SwarmUnlockKeyResponse struct { diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go index 55a03fc981..fc050e5dba 100644 --- a/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -7,12 +7,6 @@ import ( "github.com/docker/go-connections/nat" ) -// MinimumDuration puts a minimum on user configured duration. -// This is to prevent API error on time unit. For example, API may -// set 3 as healthcheck interval with intention of 3 seconds, but -// Docker interprets it as 3 nanoseconds. -const MinimumDuration = 1 * time.Millisecond - // HealthConfig holds configuration settings for the HEALTHCHECK feature. type HealthConfig struct { // Test is the test to perform to check that the container is healthy. @@ -25,9 +19,8 @@ type HealthConfig struct { Test []string `json:",omitempty"` // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. // Retries is the number of consecutive failures needed to consider a container as unhealthy. // Zero means inherit. diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go deleted file mode 100644 index 767945a532..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerChangeResponseItem container change response item -// swagger:model ContainerChangeResponseItem -type ContainerChangeResponseItem struct { - - // Kind of change - // Required: true - Kind uint8 `json:"Kind"` - - // Path to file that has changed - // Required: true - Path string `json:"Path"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go index c95023b814..d028e3b121 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -4,7 +4,7 @@ package container // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/generate-swagger-api.sh +// See hack/swagger-gen.sh // ---------------------------------------------------------------------------- // ContainerCreateCreatedBody container create created body diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go deleted file mode 100644 index 78bc37ee5e..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/container_top.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// ContainerTopOKBody container top o k body -// swagger:model ContainerTopOKBody -type ContainerTopOKBody struct { - - // Each process running in the container, where each is process is an array of values corresponding to the titles - // Required: true - Processes [][]string `json:"Processes"` - - // The ps column titles - // Required: true - Titles []string `json:"Titles"` -} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go index 2339366fbd..81ee12c678 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -4,7 +4,7 @@ package container // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/generate-swagger-api.sh +// See hack/swagger-gen.sh // ---------------------------------------------------------------------------- // ContainerUpdateOKBody container update o k body diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go index 47fb17578a..16cf335321 100644 --- a/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -4,25 +4,13 @@ package container // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/generate-swagger-api.sh +// See hack/swagger-gen.sh // ---------------------------------------------------------------------------- -// ContainerWaitOKBodyError container waiting error, if any -// swagger:model ContainerWaitOKBodyError -type ContainerWaitOKBodyError struct { - - // Details of an error - Message string `json:"Message,omitempty"` -} - // ContainerWaitOKBody container wait o k body // swagger:model ContainerWaitOKBody type ContainerWaitOKBody struct { - // error - // Required: true - Error *ContainerWaitOKBodyError `json:"Error"` - // Exit code of the container // Required: true StatusCode int64 `json:"StatusCode"` diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index 568cdcca93..0c82d625e8 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -10,6 +10,9 @@ import ( "github.com/docker/go-units" ) +// NetworkMode represents the container network stack. +type NetworkMode string + // Isolation represents the isolation technology of a container. The supported // values are platform specific type Isolation string @@ -20,101 +23,42 @@ func (i Isolation) IsDefault() bool { return strings.ToLower(string(i)) == "default" || string(i) == "" } -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -const ( - // IsolationEmpty is unspecified (same behavior as default) - IsolationEmpty = Isolation("") - // IsolationDefault is the default isolation mode on current daemon - IsolationDefault = Isolation("default") - // IsolationProcess is process isolation mode - IsolationProcess = Isolation("process") - // IsolationHyperV is HyperV isolation mode - IsolationHyperV = Isolation("hyperv") -) - // IpcMode represents the container ipc stack. type IpcMode string -// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +// IsPrivate indicates whether the container uses its private ipc stack. func (n IpcMode) IsPrivate() bool { - return n == "private" + return !(n.IsHost() || n.IsContainer()) } -// IsHost indicates whether the container shares the host's ipc namespace. +// IsHost indicates whether the container uses the host's ipc stack. func (n IpcMode) IsHost() bool { return n == "host" } -// IsShareable indicates whether the container's ipc namespace can be shared with another container. -func (n IpcMode) IsShareable() bool { - return n == "shareable" -} - -// IsContainer indicates whether the container uses another container's ipc namespace. +// IsContainer indicates whether the container uses a container's ipc stack. func (n IpcMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } -// IsNone indicates whether container IpcMode is set to "none". -func (n IpcMode) IsNone() bool { - return n == "none" -} - -// IsEmpty indicates whether container IpcMode is empty -func (n IpcMode) IsEmpty() bool { - return n == "" -} - -// Valid indicates whether the ipc mode is valid. +// Valid indicates whether the ipc stack is valid. func (n IpcMode) Valid() bool { - return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true } // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 && parts[0] == "container" { - return parts[1] - } - return "" -} - -// NetworkMode represents the container network stack. -type NetworkMode string - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { parts := strings.SplitN(string(n), ":", 2) if len(parts) > 1 { return parts[1] @@ -122,14 +66,6 @@ func (n NetworkMode) ConnectedContainer() string { return "" } -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} - // UsernsMode represents userns mode in the container. type UsernsMode string @@ -287,17 +223,6 @@ func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount } -// LogMode is a type to define the available modes for logging -// These modes affect how logs are handled when log messages start piling up. -type LogMode string - -// Available logging modes -const ( - LogModeUnset = "" - LogModeBlocking LogMode = "blocking" - LogModeNonBlock LogMode = "non-blocking" -) - // LogConfig represents the logging configuration of the container. type LogConfig struct { Type string @@ -326,7 +251,6 @@ type Resources struct { CpusetCpus string // CpusetCpus 0-2, 0,1 CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container - DeviceCgroupRules []string // List of rule to be added to the device cgroup DiskQuota int64 // Disk limit (in bytes) KernelMemory int64 // Kernel memory limit (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) @@ -393,7 +317,7 @@ type HostConfig struct { // Applicable to Windows ConsoleSize [2]uint // Initial console size (height,width) - Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + Isolation Isolation // Isolation technology of the container (eg default, hyperv) // Contains container's resources (cgroups, ulimits) Resources @@ -403,4 +327,7 @@ type HostConfig struct { // Run a custom init inside the container, if null, use the daemon's configured settings Init *bool `json:",omitempty"` + + // Custom init path + InitPath string `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 2d664d1c96..9fb79bed6f 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -2,11 +2,23 @@ package container +import "strings" + // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() } +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + // NetworkName returns the name of the network stack. func (n NetworkMode) NetworkName() string { if n.IsBridge() { @@ -35,7 +47,35 @@ func (n NetworkMode) IsHost() bool { return n == "host" } +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() } + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 3374d737f1..0ee332ba68 100644 --- a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -1,5 +1,25 @@ package container +import ( + "strings" +) + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsContainer indicates whether container uses a container network stack. +// Returns false as windows doesn't support this mode +func (n NetworkMode) IsContainer() bool { + return false +} + // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT func (n NetworkMode) IsBridge() bool { @@ -12,9 +32,30 @@ func (n NetworkMode) IsHost() bool { return false } +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// ConnectedContainer is the id of the container which network this container is connected to. +// Returns blank string on windows +func (n NetworkMode) ConnectedContainer() string { + return "" +} + // IsUserDefined indicates user-created network func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" } // IsValid indicates if an isolation technology is valid @@ -30,11 +71,17 @@ func (n NetworkMode) NetworkName() string { return "nat" } else if n.IsNone() { return "none" - } else if n.IsContainer() { - return "container" } else if n.IsUserDefined() { return n.UserDefined() } return "" } + +//UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go deleted file mode 100644 index 64820fe358..0000000000 --- a/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ /dev/null @@ -1,22 +0,0 @@ -package container - -// WaitCondition is a type used to specify a container state for which -// to wait. -type WaitCondition string - -// Possible WaitCondition Values. -// -// WaitConditionNotRunning (default) is used to wait for any of the non-running -// states: "created", "exited", "dead", "removing", or "removed". -// -// WaitConditionNextExit is used to wait for the next time the state changes -// to a non-running state. If the state is currently "created" or "exited", -// this would cause Wait() to block until either the container runs and exits -// or is removed. -// -// WaitConditionRemoved is used to wait for the container to be removed. -const ( - WaitConditionNotRunning WaitCondition = "not-running" - WaitConditionNextExit WaitCondition = "next-exit" - WaitConditionRemoved WaitCondition = "removed" -) diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go index e292565b6c..7129a65acf 100644 --- a/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -13,14 +13,6 @@ const ( PluginEventType = "plugin" // VolumeEventType is the event type that volumes generate VolumeEventType = "volume" - // ServiceEventType is the event type that services generate - ServiceEventType = "service" - // NodeEventType is the event type that nodes generate - NodeEventType = "node" - // SecretEventType is the event type that secrets generate - SecretEventType = "secret" - // ConfigEventType is the event type that configs generate - ConfigEventType = "config" ) // Actor describes something that generates events, @@ -44,8 +36,6 @@ type Message struct { Type string Action string Actor Actor - // Engine events are local scope. Cluster events are swarm scope. - Scope string `json:"scope,omitempty"` Time int64 `json:"time,omitempty"` TimeNano int64 `json:"timeNano,omitempty"` diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go index d45d0528fb..e01a41deb8 100644 --- a/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -1,45 +1,38 @@ -/*Package filters provides tools for encoding a mapping of keys to a set of -multiple values. -*/ +// Package filters provides helper function to parse and handle command line +// filter, used for example in docker ps or docker images commands. package filters import ( "encoding/json" "errors" + "fmt" "regexp" "strings" "github.com/docker/docker/api/types/versions" ) -// Args stores a mapping of keys to a set of multiple values. +// Args stores filter arguments as map key:{map key: bool}. +// It contains an aggregation of the map of arguments (which are in the form +// of -f 'key=value') based on the key, and stores values for the same key +// in a map with string keys and boolean values. +// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' +// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} type Args struct { fields map[string]map[string]bool } -// KeyValuePair are used to initialize a new Args -type KeyValuePair struct { - Key string - Value string +// NewArgs initializes a new Args struct. +func NewArgs() Args { + return Args{fields: map[string]map[string]bool{}} } -// Arg creates a new KeyValuePair for initializing Args -func Arg(key, value string) KeyValuePair { - return KeyValuePair{Key: key, Value: value} -} - -// NewArgs returns a new Args populated with the initial args -func NewArgs(initialArgs ...KeyValuePair) Args { - args := Args{fields: map[string]map[string]bool{}} - for _, arg := range initialArgs { - args.Add(arg.Key, arg.Value) - } - return args -} - -// ParseFlag parses a key=value string and adds it to an Args. +// ParseFlag parses the argument to the filter flag. Like // -// Deprecated: Use Args.Add() +// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` +// +// If prev map is provided, then it is appended to, and returned. By default a new +// map is created. func ParseFlag(arg string, prev Args) (Args, error) { filters := prev if len(arg) == 0 { @@ -60,95 +53,74 @@ func ParseFlag(arg string, prev Args) (Args, error) { return filters, nil } -// ErrBadFormat is an error returned when a filter is not in the form key=value -// -// Deprecated: this error will be removed in a future version +// ErrBadFormat is an error returned in case of bad format for a filter. var ErrBadFormat = errors.New("bad format of filter (expected name=value)") -// ToParam encodes the Args as args JSON encoded string -// -// Deprecated: use ToJSON +// ToParam packs the Args into a string for easy transport from client to server. func ToParam(a Args) (string, error) { - return ToJSON(a) -} - -// MarshalJSON returns a JSON byte representation of the Args -func (args Args) MarshalJSON() ([]byte, error) { - if len(args.fields) == 0 { - return []byte{}, nil - } - return json.Marshal(args.fields) -} - -// ToJSON returns the Args as a JSON encoded string -func ToJSON(a Args) (string, error) { + // this way we don't URL encode {}, just empty space if a.Len() == 0 { return "", nil } - buf, err := json.Marshal(a) - return string(buf), err + + buf, err := json.Marshal(a.fields) + if err != nil { + return "", err + } + return string(buf), nil } -// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 -// then the encoded format will use an older legacy format where the values are a -// list of strings, instead of a set. -// -// Deprecated: Use ToJSON +// ToParamWithVersion packs the Args into a string for easy transport from client to server. +// The generated string will depend on the specified version (corresponding to the API version). func ToParamWithVersion(version string, a Args) (string, error) { + // this way we don't URL encode {}, just empty space if a.Len() == 0 { return "", nil } + // for daemons older than v1.10, filter must be of the form map[string][]string + buf := []byte{} + err := errors.New("") if version != "" && versions.LessThan(version, "1.22") { - buf, err := json.Marshal(convertArgsToSlice(a.fields)) - return string(buf), err + buf, err = json.Marshal(convertArgsToSlice(a.fields)) + } else { + buf, err = json.Marshal(a.fields) } - - return ToJSON(a) + if err != nil { + return "", err + } + return string(buf), nil } -// FromParam decodes a JSON encoded string into Args -// -// Deprecated: use FromJSON +// FromParam unpacks the filter Args. func FromParam(p string) (Args, error) { - return FromJSON(p) -} - -// FromJSON decodes a JSON encoded string into Args -func FromJSON(p string) (Args, error) { - args := NewArgs() - - if p == "" { - return args, nil - } - - raw := []byte(p) - err := json.Unmarshal(raw, &args) - if err == nil { - return args, nil + if len(p) == 0 { + return NewArgs(), nil } - // Fallback to parsing arguments in the legacy slice format - deprecated := map[string][]string{} - if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { - return args, err - } - - args.fields = deprecatedArgs(deprecated) - return args, nil -} + r := strings.NewReader(p) + d := json.NewDecoder(r) -// UnmarshalJSON populates the Args from JSON encode bytes -func (args Args) UnmarshalJSON(raw []byte) error { - if len(raw) == 0 { - return nil + m := map[string]map[string]bool{} + if err := d.Decode(&m); err != nil { + r.Seek(0, 0) + + // Allow parsing old arguments in slice format. + // Because other libraries might be sending them in this format. + deprecated := map[string][]string{} + if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { + m = deprecatedArgs(deprecated) + } else { + return NewArgs(), err + } } - return json.Unmarshal(raw, &args.fields) + return Args{m}, nil } -// Get returns the list of values associated with the key -func (args Args) Get(key string) []string { - values := args.fields[key] +// Get returns the list of values associates with a field. +// It returns a slice of strings to keep backwards compatibility with old code. +func (filters Args) Get(field string) []string { + values := filters.fields[field] if values == nil { return make([]string, 0) } @@ -159,34 +131,37 @@ func (args Args) Get(key string) []string { return slice } -// Add a new value to the set of values -func (args Args) Add(key, value string) { - if _, ok := args.fields[key]; ok { - args.fields[key][value] = true +// Add adds a new value to a filter field. +func (filters Args) Add(name, value string) { + if _, ok := filters.fields[name]; ok { + filters.fields[name][value] = true } else { - args.fields[key] = map[string]bool{value: true} + filters.fields[name] = map[string]bool{value: true} } } -// Del removes a value from the set -func (args Args) Del(key, value string) { - if _, ok := args.fields[key]; ok { - delete(args.fields[key], value) - if len(args.fields[key]) == 0 { - delete(args.fields, key) +// Del removes a value from a filter field. +func (filters Args) Del(name, value string) { + if _, ok := filters.fields[name]; ok { + delete(filters.fields[name], value) + if len(filters.fields[name]) == 0 { + delete(filters.fields, name) } } } -// Len returns the number of keys in the mapping -func (args Args) Len() int { - return len(args.fields) +// Len returns the number of fields in the arguments. +func (filters Args) Len() int { + return len(filters.fields) } -// MatchKVList returns true if all the pairs in sources exist as key=value -// pairs in the mapping at key, or if there are no values at key. -func (args Args) MatchKVList(key string, sources map[string]string) bool { - fieldValues := args.fields[key] +// MatchKVList returns true if the values for the specified field matches the ones +// from the sources. +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'label' and sources are {'label1': '1', 'label2': '2'} +// it returns true. +func (filters Args) MatchKVList(field string, sources map[string]string) bool { + fieldValues := filters.fields[field] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { @@ -197,8 +172,8 @@ func (args Args) MatchKVList(key string, sources map[string]string) bool { return false } - for value := range fieldValues { - testKV := strings.SplitN(value, "=", 2) + for name2match := range fieldValues { + testKV := strings.SplitN(name2match, "=", 2) v, ok := sources[testKV[0]] if !ok { @@ -212,13 +187,16 @@ func (args Args) MatchKVList(key string, sources map[string]string) bool { return true } -// Match returns true if any of the values at key match the source string -func (args Args) Match(field, source string) bool { - if args.ExactMatch(field, source) { +// Match returns true if the values for the specified field matches the source string +// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, +// field is 'image.name' and source is 'ubuntu' +// it returns true. +func (filters Args) Match(field, source string) bool { + if filters.ExactMatch(field, source) { return true } - fieldValues := args.fields[field] + fieldValues := filters.fields[field] for name2match := range fieldValues { match, err := regexp.MatchString(name2match, source) if err != nil { @@ -231,9 +209,9 @@ func (args Args) Match(field, source string) bool { return false } -// ExactMatch returns true if the source matches exactly one of the values. -func (args Args) ExactMatch(key, source string) bool { - fieldValues, ok := args.fields[key] +// ExactMatch returns true if the source matches exactly one of the filters. +func (filters Args) ExactMatch(field, source string) bool { + fieldValues, ok := filters.fields[field] //do not filter if there is no filter set or cannot determine filter if !ok || len(fieldValues) == 0 { return true @@ -243,15 +221,14 @@ func (args Args) ExactMatch(key, source string) bool { return fieldValues[source] } -// UniqueExactMatch returns true if there is only one value and the source -// matches exactly the value. -func (args Args) UniqueExactMatch(key, source string) bool { - fieldValues := args.fields[key] +// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. +func (filters Args) UniqueExactMatch(field, source string) bool { + fieldValues := filters.fields[field] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } - if len(args.fields[key]) != 1 { + if len(filters.fields[field]) != 1 { return false } @@ -259,14 +236,14 @@ func (args Args) UniqueExactMatch(key, source string) bool { return fieldValues[source] } -// FuzzyMatch returns true if the source matches exactly one value, or the -// source has one of the values as a prefix. -func (args Args) FuzzyMatch(key, source string) bool { - if args.ExactMatch(key, source) { +// FuzzyMatch returns true if the source matches exactly one of the filters, +// or the source has one of the filters as a prefix. +func (filters Args) FuzzyMatch(field, source string) bool { + if filters.ExactMatch(field, source) { return true } - fieldValues := args.fields[key] + fieldValues := filters.fields[field] for prefix := range fieldValues { if strings.HasPrefix(source, prefix) { return true @@ -275,47 +252,30 @@ func (args Args) FuzzyMatch(key, source string) bool { return false } -// Include returns true if the key exists in the mapping -// -// Deprecated: use Contains -func (args Args) Include(field string) bool { - _, ok := args.fields[field] - return ok -} - -// Contains returns true if the key exists in the mapping -func (args Args) Contains(field string) bool { - _, ok := args.fields[field] +// Include returns true if the name of the field to filter is in the filters. +func (filters Args) Include(field string) bool { + _, ok := filters.fields[field] return ok } -type invalidFilter string - -func (e invalidFilter) Error() string { - return "Invalid filter '" + string(e) + "'" -} - -func (invalidFilter) InvalidParameter() {} - -// Validate compared the set of accepted keys against the keys in the mapping. -// An error is returned if any mapping keys are not in the accepted set. -func (args Args) Validate(accepted map[string]bool) error { - for name := range args.fields { +// Validate ensures that all the fields in the filter are valid. +// It returns an error as soon as it finds an invalid field. +func (filters Args) Validate(accepted map[string]bool) error { + for name := range filters.fields { if !accepted[name] { - return invalidFilter(name) + return fmt.Errorf("Invalid filter '%s'", name) } } return nil } -// WalkValues iterates over the list of values for a key in the mapping and calls -// op() for each value. If op returns an error the iteration stops and the -// error is returned. -func (args Args) WalkValues(field string, op func(value string) error) error { - if _, ok := args.fields[field]; !ok { +// WalkValues iterates over the list of filtered values for a field. +// It stops the iteration if it finds an error and it returns that error. +func (filters Args) WalkValues(field string, op func(value string) error) error { + if _, ok := filters.fields[field]; !ok { return nil } - for v := range args.fields[field] { + for v := range filters.fields[field] { if err := op(v); err != nil { return err } diff --git a/vendor/github.com/docker/docker/api/types/filters/parse_test.go b/vendor/github.com/docker/docker/api/types/filters/parse_test.go new file mode 100644 index 0000000000..b2ed27b9ce --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse_test.go @@ -0,0 +1,417 @@ +package filters + +import ( + "fmt" + "testing" +) + +func TestParseArgs(t *testing.T) { + // equivalent of `docker ps -f 'created=today' -f 'image.name=ubuntu*' -f 'image.name=*untu'` + flagArgs := []string{ + "created=today", + "image.name=ubuntu*", + "image.name=*untu", + } + var ( + args = NewArgs() + err error + ) + for i := range flagArgs { + args, err = ParseFlag(flagArgs[i], args) + if err != nil { + t.Errorf("failed to parse %s: %s", flagArgs[i], err) + } + } + if len(args.Get("created")) != 1 { + t.Errorf("failed to set this arg") + } + if len(args.Get("image.name")) != 2 { + t.Errorf("the args should have collapsed") + } +} + +func TestParseArgsEdgeCase(t *testing.T) { + var filters Args + args, err := ParseFlag("", filters) + if err != nil { + t.Fatal(err) + } + if args.Len() != 0 { + t.Fatalf("Expected an empty Args (map), got %v", args) + } + if args, err = ParseFlag("anything", args); err == nil || err != ErrBadFormat { + t.Fatalf("Expected ErrBadFormat, got %v", err) + } +} + +func TestToParam(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + _, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } +} + +func TestToParamWithVersion(t *testing.T) { + fields := map[string]map[string]bool{ + "created": {"today": true}, + "image.name": {"ubuntu*": true, "*untu": true}, + } + a := Args{fields: fields} + + str1, err := ToParamWithVersion("1.21", a) + if err != nil { + t.Errorf("failed to marshal the filters with version < 1.22: %s", err) + } + str2, err := ToParamWithVersion("1.22", a) + if err != nil { + t.Errorf("failed to marshal the filters with version >= 1.22: %s", err) + } + if str1 != `{"created":["today"],"image.name":["*untu","ubuntu*"]}` && + str1 != `{"created":["today"],"image.name":["ubuntu*","*untu"]}` { + t.Errorf("incorrectly marshaled the filters: %s", str1) + } + if str2 != `{"created":{"today":true},"image.name":{"*untu":true,"ubuntu*":true}}` && + str2 != `{"created":{"today":true},"image.name":{"ubuntu*":true,"*untu":true}}` { + t.Errorf("incorrectly marshaled the filters: %s", str2) + } +} + +func TestFromParam(t *testing.T) { + invalids := []string{ + "anything", + "['a','list']", + "{'key': 'value'}", + `{"key": "value"}`, + } + valid := map[*Args][]string{ + &Args{fields: map[string]map[string]bool{"key": {"value": true}}}: { + `{"key": ["value"]}`, + `{"key": {"value": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key": {"value1": true, "value2": true}}}: { + `{"key": ["value1", "value2"]}`, + `{"key": {"value1": true, "value2": true}}`, + }, + &Args{fields: map[string]map[string]bool{"key1": {"value1": true}, "key2": {"value2": true}}}: { + `{"key1": ["value1"], "key2": ["value2"]}`, + `{"key1": {"value1": true}, "key2": {"value2": true}}`, + }, + } + + for _, invalid := range invalids { + if _, err := FromParam(invalid); err == nil { + t.Fatalf("Expected an error with %v, got nothing", invalid) + } + } + + for expectedArgs, matchers := range valid { + for _, json := range matchers { + args, err := FromParam(json) + if err != nil { + t.Fatal(err) + } + if args.Len() != expectedArgs.Len() { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + for key, expectedValues := range expectedArgs.fields { + values := args.Get(key) + + if len(values) != len(expectedValues) { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + + for _, v := range values { + if !expectedValues[v] { + t.Fatalf("Expected %v, go %v", expectedArgs, args) + } + } + } + } + } +} + +func TestEmpty(t *testing.T) { + a := Args{} + v, err := ToParam(a) + if err != nil { + t.Errorf("failed to marshal the filters: %s", err) + } + v1, err := FromParam(v) + if err != nil { + t.Errorf("%s", err) + } + if a.Len() != v1.Len() { + t.Errorf("these should both be empty sets") + } +} + +func TestArgsMatchKVListEmptySources(t *testing.T) { + args := NewArgs() + if !args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected true for (%v,created), got true", args) + } + + args = Args{map[string]map[string]bool{"created": {"today": true}}} + if args.MatchKVList("created", map[string]string{}) { + t.Fatalf("Expected false for (%v,created), got true", args) + } +} + +func TestArgsMatchKVList(t *testing.T) { + // Not empty sources + sources := map[string]string{ + "key1": "value1", + "key2": "value2", + "key3": "value3", + } + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value1": true}}, + }: "labels", + } + + for args, field := range matches { + if args.MatchKVList(field, sources) != true { + t.Fatalf("Expected true for %v on %v, got false", sources, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key4": true}}, + }: "labels", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}, + "labels": map[string]bool{"key1=value3": true}}, + }: "labels", + } + + for args, field := range differs { + if args.MatchKVList(field, sources) != false { + t.Fatalf("Expected false for %v on %v, got true", sources, args) + } + } +} + +func TestArgsMatch(t *testing.T) { + source := "today" + + matches := map[*Args]string{ + &Args{}: "field", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today": true}}, + }: "today", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to*": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tod": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"anyting": true, "to*": true}}, + }: "created", + } + + for args, field := range matches { + if args.Match(field, source) != true { + t.Fatalf("Expected true for %v on %v, got false", source, args) + } + } + + differs := map[*Args]string{ + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tomorrow": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"to(day": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom(.*)": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"tom": true}}, + }: "created", + &Args{map[string]map[string]bool{ + "created": map[string]bool{"today1": true}, + "labels": map[string]bool{"today": true}}, + }: "created", + } + + for args, field := range differs { + if args.Match(field, source) != false { + t.Fatalf("Expected false for %v on %v, got true", source, args) + } + } +} + +func TestAdd(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + v := f.fields["status"] + if len(v) != 1 || !v["running"] { + t.Fatalf("Expected to include a running status, got %v", v) + } + + f.Add("status", "paused") + if len(v) != 2 || !v["paused"] { + t.Fatalf("Expected to include a paused status, got %v", v) + } +} + +func TestDel(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Del("status", "running") + v := f.fields["status"] + if v["running"] { + t.Fatalf("Expected to not include a running status filter, got true") + } +} + +func TestLen(t *testing.T) { + f := NewArgs() + if f.Len() != 0 { + t.Fatalf("Expected to not include any field") + } + f.Add("status", "running") + if f.Len() != 1 { + t.Fatalf("Expected to include one field") + } +} + +func TestExactMatch(t *testing.T) { + f := NewArgs() + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + f.Add("status", "pause*") + + if !f.ExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.ExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } +} + +func TestOnlyOneExactMatch(t *testing.T) { + f := NewArgs() + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` when there are no filters, got false") + } + + f.Add("status", "running") + + if !f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to match `running` with one of the filters, got false") + } + + if f.UniqueExactMatch("status", "paused") { + t.Fatalf("Expected to not match `paused` with one of the filters, got true") + } + + f.Add("status", "pause") + if f.UniqueExactMatch("status", "running") { + t.Fatalf("Expected to not match only `running` with two filters, got true") + } +} + +func TestInclude(t *testing.T) { + f := NewArgs() + if f.Include("status") { + t.Fatalf("Expected to not include a status key, got true") + } + f.Add("status", "running") + if !f.Include("status") { + t.Fatalf("Expected to include a status key, got false") + } +} + +func TestValidate(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + + valid := map[string]bool{ + "status": true, + "dangling": true, + } + + if err := f.Validate(valid); err != nil { + t.Fatal(err) + } + + f.Add("bogus", "running") + if err := f.Validate(valid); err == nil { + t.Fatalf("Expected to return an error, got nil") + } +} + +func TestWalkValues(t *testing.T) { + f := NewArgs() + f.Add("status", "running") + f.Add("status", "paused") + + f.WalkValues("status", func(value string) error { + if value != "running" && value != "paused" { + t.Fatalf("Unexpected value %s", value) + } + return nil + }) + + err := f.WalkValues("status", func(value string) error { + return fmt.Errorf("return") + }) + if err == nil { + t.Fatalf("Expected to get an error, got nil") + } + + err = f.WalkValues("foo", func(value string) error { + return fmt.Errorf("return") + }) + if err != nil { + t.Fatalf("Expected to not iterate when the field doesn't exist, got %v", err) + } +} + +func TestFuzzyMatch(t *testing.T) { + f := NewArgs() + f.Add("container", "foo") + + cases := map[string]bool{ + "foo": true, + "foobar": true, + "barfoo": false, + "bar": false, + } + for source, match := range cases { + got := f.FuzzyMatch("container", source) + if got != match { + t.Fatalf("Expected %v, got %v: %s", match, got, source) + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go deleted file mode 100644 index 4d9bf1c62c..0000000000 --- a/vendor/github.com/docker/docker/api/types/graph_driver_data.go +++ /dev/null @@ -1,17 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// GraphDriverData Information about a container's graph driver. -// swagger:model GraphDriverData -type GraphDriverData struct { - - // data - // Required: true - Data map[string]string `json:"Data"` - - // name - // Required: true - Name string `json:"Name"` -} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go deleted file mode 100644 index 0dd30c729a..0000000000 --- a/vendor/github.com/docker/docker/api/types/image/image_history.go +++ /dev/null @@ -1,37 +0,0 @@ -package image - -// ---------------------------------------------------------------------------- -// DO NOT EDIT THIS FILE -// This file was generated by `swagger generate operation` -// -// See hack/generate-swagger-api.sh -// ---------------------------------------------------------------------------- - -// HistoryResponseItem history response item -// swagger:model HistoryResponseItem -type HistoryResponseItem struct { - - // comment - // Required: true - Comment string `json:"Comment"` - - // created - // Required: true - Created int64 `json:"Created"` - - // created by - // Required: true - CreatedBy string `json:"CreatedBy"` - - // Id - // Required: true - ID string `json:"Id"` - - // size - // Required: true - Size int64 `json:"Size"` - - // tags - // Required: true - Tags []string `json:"Tags"` -} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go deleted file mode 100644 index b9a65a0d8e..0000000000 --- a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// This file was generated by the swagger tool. -// Editing this file might prove futile when you re-run the swagger generate command - -// ImageDeleteResponseItem image delete response item -// swagger:model ImageDeleteResponseItem -type ImageDeleteResponseItem struct { - - // The image ID of an image that was deleted - Deleted string `json:"Deleted,omitempty"` - - // The image ID of an image that was untagged - Untagged string `json:"Untagged,omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index b7d133cd84..31f2365b8e 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -15,8 +15,6 @@ const ( TypeVolume Type = "volume" // TypeTmpfs is the type for mounting tmpfs TypeTmpfs Type = "tmpfs" - // TypeNamedPipe is the type for mounting Windows named pipes - TypeNamedPipe Type = "npipe" ) // Mount represents a mount (volume). @@ -25,10 +23,9 @@ type Mount struct { // Source specifies the name of the mount. Depending on mount type, this // may be a volume name or a host path, or even ignored. // Source is not supported for tmpfs (must be an empty value) - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - Consistency Consistency `json:",omitempty"` + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` BindOptions *BindOptions `json:",omitempty"` VolumeOptions *VolumeOptions `json:",omitempty"` @@ -63,20 +60,6 @@ var Propagations = []Propagation{ PropagationSlave, } -// Consistency represents the consistency requirements of a mount. -type Consistency string - -const ( - // ConsistencyFull guarantees bind mount-like consistency - ConsistencyFull Consistency = "consistent" - // ConsistencyCached mounts can cache read data and FS structure - ConsistencyCached Consistency = "cached" - // ConsistencyDelegated mounts can cache read and written data and structure - ConsistencyDelegated Consistency = "delegated" - // ConsistencyDefault provides "consistent" behavior unless overridden - ConsistencyDefault Consistency = "default" -) - // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { Propagation Propagation `json:",omitempty"` @@ -100,7 +83,7 @@ type TmpfsOptions struct { // Size sets the size of the tmpfs, in bytes. // // This will be converted to an operating system specific value - // depending on the host. For example, on linux, it will be converted to + // depending on the host. For example, on linux, it will be convered to // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with // docker, uses a straight byte value. // diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 7c7dbacc85..832b3edb9f 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -28,15 +28,7 @@ type EndpointIPAMConfig struct { LinkLocalIPs []string `json:",omitempty"` } -// Copy makes a copy of the endpoint ipam config -func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { - cfgCopy := *cfg - cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) - cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) - return &cfgCopy -} - -// PeerInfo represents one peer of an overlay network +// PeerInfo represents one peer of a overlay network type PeerInfo struct { Name string IP string @@ -58,42 +50,6 @@ type EndpointSettings struct { GlobalIPv6Address string GlobalIPv6PrefixLen int MacAddress string - DriverOpts map[string]string -} - -// Task carries the information about one backend task -type Task struct { - Name string - EndpointID string - EndpointIP string - Info map[string]string -} - -// ServiceInfo represents service parameters with the list of service's tasks -type ServiceInfo struct { - VIP string - Ports []string - LocalLBIndex int - Tasks []Task -} - -// Copy makes a deep copy of `EndpointSettings` -func (es *EndpointSettings) Copy() *EndpointSettings { - epCopy := *es - if es.IPAMConfig != nil { - epCopy.IPAMConfig = es.IPAMConfig.Copy() - } - - if es.Links != nil { - links := make([]string, 0, len(es.Links)) - epCopy.Links = append(links, es.Links...) - } - - if es.Aliases != nil { - aliases := make([]string, 0, len(es.Aliases)) - epCopy.Aliases = append(aliases, es.Aliases...) - } - return &epCopy } // NetworkingConfig represents the container's networking configuration for each of its interfaces @@ -101,8 +57,3 @@ func (es *EndpointSettings) Copy() *EndpointSettings { type NetworkingConfig struct { EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network } - -// ConfigReference specifies the source which provides a network's configuration -type ConfigReference struct { - Network string -} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go index cab333e01a..6cc7a23b02 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -11,7 +11,7 @@ type Plugin struct { // Required: true Config PluginConfig `json:"Config"` - // True if the plugin is running. False if the plugin is not running, only installed. + // True when the plugin is running. False when the plugin is not running, only installed. // Required: true Enabled bool `json:"Enabled"` @@ -42,9 +42,6 @@ type PluginConfig struct { // Required: true Description string `json:"Description"` - // Docker Version used to create the plugin - DockerVersion string `json:"DockerVersion,omitempty"` - // documentation // Required: true Documentation string `json:"Documentation"` @@ -61,10 +58,6 @@ type PluginConfig struct { // Required: true Interface PluginConfigInterface `json:"Interface"` - // ipc host - // Required: true - IpcHost bool `json:"IpcHost"` - // linux // Required: true Linux PluginConfigLinux `json:"Linux"` @@ -77,10 +70,6 @@ type PluginConfig struct { // Required: true Network PluginConfigNetwork `json:"Network"` - // pid host - // Required: true - PidHost bool `json:"PidHost"` - // propagated mount // Required: true PropagatedMount string `json:"PropagatedMount"` diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go index 18f743fcde..d6f7553119 100644 --- a/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -3,12 +3,19 @@ package types import ( "encoding/json" "fmt" - "sort" ) // PluginsListResponse contains the response for the Engine API type PluginsListResponse []*Plugin +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + // UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { versionIndex := len(p) @@ -55,17 +62,3 @@ type PluginPrivilege struct { // PluginPrivileges is a list of PluginPrivilege type PluginPrivileges []PluginPrivilege - -func (s PluginPrivileges) Len() int { - return len(s) -} - -func (s PluginPrivileges) Less(i, j int) bool { - return s[i].Name < s[j].Name -} - -func (s PluginPrivileges) Swap(i, j int) { - sort.Strings(s[i].Value) - sort.Strings(s[j].Value) - s[i], s[j] = s[j], s[i] -} diff --git a/vendor/github.com/docker/docker/api/types/reference/image_reference.go b/vendor/github.com/docker/docker/api/types/reference/image_reference.go new file mode 100644 index 0000000000..be9cf8ebed --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/reference/image_reference.go @@ -0,0 +1,34 @@ +package reference + +import ( + distreference "github.com/docker/distribution/reference" +) + +// Parse parses the given references and returns the repository and +// tag (if present) from it. If there is an error during parsing, it will +// return an error. +func Parse(ref string) (string, string, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return "", "", err + } + + tag := GetTagFromNamedRef(distributionRef) + return distributionRef.Name(), tag, nil +} + +// GetTagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api makes the distinction between repository +// and tags. +func GetTagFromNamedRef(ref distreference.Named) string { + var tag string + switch x := ref.(type) { + case distreference.Digested: + tag = x.Digest().String() + case distreference.NamedTagged: + tag = x.Tag() + default: + tag = "latest" + } + return tag +} diff --git a/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go b/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go new file mode 100644 index 0000000000..61fb676b6c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/reference/image_reference_test.go @@ -0,0 +1,72 @@ +package reference + +import ( + "testing" +) + +func TestParse(t *testing.T) { + testCases := []struct { + ref string + expectedName string + expectedTag string + expectedError bool + }{ + { + ref: "", + expectedName: "", + expectedTag: "", + expectedError: true, + }, + { + ref: "repository", + expectedName: "repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "repository:tag", + expectedName: "repository", + expectedTag: "tag", + expectedError: false, + }, + { + ref: "test.com/repository", + expectedName: "test.com/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/test/repository", + expectedName: "test.com:5000/test/repository", + expectedTag: "latest", + expectedError: false, + }, + { + ref: "test.com:5000/repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + { + ref: "test.com:5000/repo:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedName: "test.com:5000/repo", + expectedTag: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", + expectedError: false, + }, + } + + for _, c := range testCases { + name, tag, err := Parse(c.ref) + if err != nil && c.expectedError { + continue + } else if err != nil { + t.Fatalf("error with %s: %s", c.ref, err.Error()) + } + if name != c.expectedName { + t.Fatalf("expected name %s, got %s", c.expectedName, name) + } + if tag != c.expectedTag { + t.Fatalf("expected tag %s, got %s", c.expectedTag, tag) + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go index 42cac4430a..5e37d19bd4 100644 --- a/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -4,7 +4,7 @@ package registry // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/generate-swagger-api.sh +// See hack/swagger-gen.sh // ---------------------------------------------------------------------------- // AuthenticateOKBody authenticate o k body diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go index b98a943a13..28fafab901 100644 --- a/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -3,17 +3,13 @@ package registry import ( "encoding/json" "net" - - "github.com/opencontainers/image-spec/specs-go/v1" ) // ServiceConfig stores daemon registry services configuration. type ServiceConfig struct { - AllowNondistributableArtifactsCIDRs []*NetIPNet - AllowNondistributableArtifactsHostnames []string - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string } // NetIPNet is the net.IPNet type, which can be marshalled and @@ -106,14 +102,3 @@ type SearchResults struct { // Results is a slice containing the actual results for the search Results []SearchResult `json:"results"` } - -// DistributionInspect describes the result obtained from contacting the -// registry to retrieve image metadata -type DistributionInspect struct { - // Descriptor contains information about the manifest, including - // the content addressable digest - Descriptor v1.Descriptor - // Platforms contains the list of platforms supported by the image, - // obtained by parsing the manifest - Platforms []v1.Platform -} diff --git a/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/docker/docker/api/types/seccomp.go index 7d62c9a43f..4f02ef36b8 100644 --- a/vendor/github.com/docker/docker/api/types/seccomp.go +++ b/vendor/github.com/docker/docker/api/types/seccomp.go @@ -10,7 +10,7 @@ type Seccomp struct { Syscalls []*Syscall `json:"syscalls"` } -// Architecture is used to represent a specific architecture +// Architecture is used to represent an specific architecture // and its sub-architectures type Architecture struct { Arch Arch `json:"architecture"` diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go index 7ca76a5b63..9bf1928b8c 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -47,9 +47,6 @@ type CPUStats struct { // System Usage. Linux only. SystemUsage uint64 `json:"system_cpu_usage,omitempty"` - // Online CPUs. Linux only. - OnlineCPUs uint32 `json:"online_cpus,omitempty"` - // Throttling Data. Linux only. ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go new file mode 100644 index 0000000000..1163b3652c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice_test.go @@ -0,0 +1,86 @@ +package strslice + +import ( + "encoding/json" + "reflect" + "testing" +) + +func TestStrSliceMarshalJSON(t *testing.T) { + for _, testcase := range []struct { + input StrSlice + expected string + }{ + // MADNESS(stevvooe): No clue why nil would be "" but empty would be + // "null". Had to make a change here that may affect compatibility. + {input: nil, expected: "null"}, + {StrSlice{}, "[]"}, + {StrSlice{"/bin/sh", "-c", "echo"}, `["/bin/sh","-c","echo"]`}, + } { + data, err := json.Marshal(testcase.input) + if err != nil { + t.Fatal(err) + } + if string(data) != testcase.expected { + t.Fatalf("%#v: expected %v, got %v", testcase.input, testcase.expected, string(data)) + } + } +} + +func TestStrSliceUnmarshalJSON(t *testing.T) { + parts := map[string][]string{ + "": {"default", "values"}, + "[]": {}, + `["/bin/sh","-c","echo"]`: {"/bin/sh", "-c", "echo"}, + } + for json, expectedParts := range parts { + strs := StrSlice{"default", "values"} + if err := strs.UnmarshalJSON([]byte(json)); err != nil { + t.Fatal(err) + } + + actualParts := []string(strs) + if !reflect.DeepEqual(actualParts, expectedParts) { + t.Fatalf("%#v: expected %v, got %v", json, expectedParts, actualParts) + } + + } +} + +func TestStrSliceUnmarshalString(t *testing.T) { + var e StrSlice + echo, err := json.Marshal("echo") + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} + +func TestStrSliceUnmarshalSlice(t *testing.T) { + var e StrSlice + echo, err := json.Marshal([]string{"echo"}) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(echo, &e); err != nil { + t.Fatal(err) + } + + if len(e) != 1 { + t.Fatalf("expected 1 element after unmarshal: %q", e) + } + + if e[0] != "echo" { + t.Fatalf("expected `echo`, got: %q", e[0]) + } +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go index 2834cf2022..64a648bad1 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -17,24 +17,11 @@ type Meta struct { // Annotations represents how to describe an object. type Annotations struct { Name string `json:",omitempty"` - Labels map[string]string `json:"Labels"` + Labels map[string]string `json:",omitempty"` } -// Driver represents a driver (network, logging, secrets backend). +// Driver represents a driver (network, logging). type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` } - -// TLSInfo represents the TLS information about what CA certificate is trusted, -// and who the issuer for a TLS certificate is -type TLSInfo struct { - // TrustRoot is the trusted CA root certificate in PEM format - TrustRoot string `json:",omitempty"` - - // CertIssuer is the raw subject bytes of the issuer - CertIssuerSubject []byte `json:",omitempty"` - - // CertIssuerPublicKey is the raw public key bytes of the issuer - CertIssuerPublicKey []byte `json:",omitempty"` -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go deleted file mode 100644 index 0fb021ce92..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/config.go +++ /dev/null @@ -1,31 +0,0 @@ -package swarm - -import "os" - -// Config represents a config. -type Config struct { - ID string - Meta - Spec ConfigSpec -} - -// ConfigSpec represents a config specification from a config in swarm -type ConfigSpec struct { - Annotations - Data []byte `json:",omitempty"` -} - -// ConfigReferenceFileTarget is a file target in a config reference -type ConfigReferenceFileTarget struct { - Name string - UID string - GID string - Mode os.FileMode -} - -// ConfigReference is a reference to a config in swarm -type ConfigReference struct { - File *ConfigReferenceFileTarget - ConfigID string - ConfigName string -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 734236c4b0..4ab476ccc3 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -21,28 +21,6 @@ type DNSConfig struct { Options []string `json:",omitempty"` } -// SELinuxContext contains the SELinux labels of the container. -type SELinuxContext struct { - Disable bool - - User string - Role string - Type string - Level string -} - -// CredentialSpec for managed service account (Windows only) -type CredentialSpec struct { - File string - Registry string -} - -// Privileges defines the security options for the container. -type Privileges struct { - CredentialSpec *CredentialSpec - SELinuxContext *SELinuxContext -} - // ContainerSpec represents the spec of a container. type ContainerSpec struct { Image string `json:",omitempty"` @@ -54,20 +32,15 @@ type ContainerSpec struct { Dir string `json:",omitempty"` User string `json:",omitempty"` Groups []string `json:",omitempty"` - Privileges *Privileges `json:",omitempty"` - StopSignal string `json:",omitempty"` TTY bool `json:",omitempty"` OpenStdin bool `json:",omitempty"` - ReadOnly bool `json:",omitempty"` Mounts []mount.Mount `json:",omitempty"` StopGracePeriod *time.Duration `json:",omitempty"` Healthcheck *container.HealthConfig `json:",omitempty"` // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` - Isolation container.Isolation `json:",omitempty"` + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go index 97c484e14c..5a5e11bdba 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -1,9 +1,5 @@ package swarm -import ( - "github.com/docker/docker/api/types/network" -) - // Endpoint represents an endpoint. type Endpoint struct { Spec EndpointSpec `json:",omitempty"` @@ -82,21 +78,17 @@ type Network struct { // NetworkSpec represents the spec of a network. type NetworkSpec struct { Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - Attachable bool `json:",omitempty"` - Ingress bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` - ConfigFrom *network.ConfigReference `json:",omitempty"` - Scope string `json:",omitempty"` + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` } // NetworkAttachmentConfig represents the configuration of a network attachment. type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` - DriverOpts map[string]string `json:",omitempty"` + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` } // NetworkAttachment represents a network attachment. diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 28c6851e9c..379e17a779 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -52,7 +52,6 @@ type NodeDescription struct { Platform Platform `json:",omitempty"` Resources Resources `json:",omitempty"` Engine EngineDescription `json:",omitempty"` - TLSInfo TLSInfo `json:",omitempty"` } // Platform represents the platform (Arch/OS). diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go deleted file mode 100644 index c4c731dc82..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ /dev/null @@ -1,19 +0,0 @@ -package swarm - -// RuntimeType is the type of runtime used for the TaskSpec -type RuntimeType string - -// RuntimeURL is the proto type url -type RuntimeURL string - -const ( - // RuntimeContainer is the container based runtime - RuntimeContainer RuntimeType = "container" - // RuntimePlugin is the plugin based runtime - RuntimePlugin RuntimeType = "plugin" - - // RuntimeURLContainer is the proto url for the container type - RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" - // RuntimeURLPlugin is the proto url for the plugin type - RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" -) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go deleted file mode 100644 index 47ae234ef3..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go +++ /dev/null @@ -1,3 +0,0 @@ -//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto - -package runtime diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go deleted file mode 100644 index 1fdc9b0436..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go +++ /dev/null @@ -1,712 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: plugin.proto -// DO NOT EDIT! - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - plugin.proto - - It has these top-level messages: - PluginSpec - PluginPrivilege -*/ -package runtime - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -type PluginSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` - Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` - Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` -} - -func (m *PluginSpec) Reset() { *m = PluginSpec{} } -func (m *PluginSpec) String() string { return proto.CompactTextString(m) } -func (*PluginSpec) ProtoMessage() {} -func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } - -func (m *PluginSpec) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginSpec) GetRemote() string { - if m != nil { - return m.Remote - } - return "" -} - -func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { - if m != nil { - return m.Privileges - } - return nil -} - -func (m *PluginSpec) GetDisabled() bool { - if m != nil { - return m.Disabled - } - return false -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` -} - -func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } -func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } -func (*PluginPrivilege) ProtoMessage() {} -func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } - -func (m *PluginPrivilege) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *PluginPrivilege) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *PluginPrivilege) GetValue() []string { - if m != nil { - return m.Value - } - return nil -} - -func init() { - proto.RegisterType((*PluginSpec)(nil), "PluginSpec") - proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") -} -func (m *PluginSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Remote) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) - i += copy(dAtA[i:], m.Remote) - } - if len(m.Privileges) > 0 { - for _, msg := range m.Privileges { - dAtA[i] = 0x1a - i++ - i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Disabled { - dAtA[i] = 0x20 - i++ - if m.Disabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if len(m.Description) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PluginSpec) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Remote) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Privileges) > 0 { - for _, e := range m.Privileges { - l = e.Size() - n += 1 + l + sovPlugin(uint64(l)) - } - } - if m.Disabled { - n += 2 - } - return n -} - -func (m *PluginPrivilege) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovPlugin(uint64(l)) - } - if len(m.Value) > 0 { - for _, s := range m.Value { - l = len(s) - n += 1 + l + sovPlugin(uint64(l)) - } - } - return n -} - -func sovPlugin(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozPlugin(x uint64) (n int) { - return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PluginSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Remote = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Privileges = append(m.Privileges, &PluginPrivilege{}) - if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Disabled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowPlugin - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthPlugin - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipPlugin(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthPlugin - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipPlugin(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthPlugin - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowPlugin - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipPlugin(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } - -var fileDescriptorPlugin = []byte{ - // 196 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, - 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, - 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, - 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, - 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, - 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, - 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, - 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, - 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, - 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, - 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, - 0x0c, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto deleted file mode 100644 index 06eb7ba650..0000000000 --- a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -// PluginSpec defines the base payload which clients can specify for creating -// a service with the plugin runtime. -message PluginSpec { - string name = 1; - string remote = 2; - repeated PluginPrivilege privileges = 3; - bool disabled = 4; -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -message PluginPrivilege { - string name = 1; - string description = 2; - repeated string value = 3; -} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go index f9b1e92669..fdb2388888 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -12,8 +12,7 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` - Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + Data []byte `json:",omitempty"` } // SecretReferenceFileTarget is a file target in a secret reference diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go index fa31a7ec86..2cf2642c1f 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -6,10 +6,10 @@ import "time" type Service struct { ID string Meta - Spec ServiceSpec `json:",omitempty"` - PreviousSpec *ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus *UpdateStatus `json:",omitempty"` + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus UpdateStatus `json:",omitempty"` } // ServiceSpec represents the spec of a service. @@ -18,10 +18,9 @@ type ServiceSpec struct { // TaskTemplate defines how the service should construct new tasks when // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - RollbackConfig *UpdateConfig `json:",omitempty"` + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` // Networks field in ServiceSpec is deprecated. The // same field in TaskSpec should be used instead. @@ -46,19 +45,13 @@ const ( UpdateStatePaused UpdateState = "paused" // UpdateStateCompleted is the completed state. UpdateStateCompleted UpdateState = "completed" - // UpdateStateRollbackStarted is the state with a rollback in progress. - UpdateStateRollbackStarted UpdateState = "rollback_started" - // UpdateStateRollbackPaused is the state with a rollback in progress. - UpdateStateRollbackPaused UpdateState = "rollback_paused" - // UpdateStateRollbackCompleted is the state with a rollback in progress. - UpdateStateRollbackCompleted UpdateState = "rollback_completed" ) // UpdateStatus reports the status of a service update. type UpdateStatus struct { State UpdateState `json:",omitempty"` - StartedAt *time.Time `json:",omitempty"` - CompletedAt *time.Time `json:",omitempty"` + StartedAt time.Time `json:",omitempty"` + CompletedAt time.Time `json:",omitempty"` Message string `json:",omitempty"` } @@ -75,13 +68,6 @@ const ( UpdateFailureActionPause = "pause" // UpdateFailureActionContinue CONTINUE UpdateFailureActionContinue = "continue" - // UpdateFailureActionRollback ROLLBACK - UpdateFailureActionRollback = "rollback" - - // UpdateOrderStopFirst STOP_FIRST - UpdateOrderStopFirst = "stop-first" - // UpdateOrderStartFirst START_FIRST - UpdateOrderStartFirst = "start-first" ) // UpdateConfig represents the update configuration. @@ -116,9 +102,4 @@ type UpdateConfig struct { // If the failure action is PAUSE, no more tasks will be updated until // another update is started. MaxFailureRatio float32 - - // Order indicates the order of operations when rolling out an updated - // task. Either the old task is shut down before the new task is - // started, or the new task is started before the old task is shut down. - Order string } diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index b65fa86dac..0b42219696 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -2,14 +2,12 @@ package swarm import "time" -// ClusterInfo represents info about the cluster for outputting in "info" +// ClusterInfo represents info about the cluster for outputing in "info" // it contains the same information as "Swarm", but without the JoinTokens type ClusterInfo struct { ID string Meta - Spec Spec - TLSInfo TLSInfo - RootRotationInProgress bool + Spec Spec } // Swarm represents a swarm. @@ -109,16 +107,6 @@ type CAConfig struct { // ExternalCAs is a list of CAs to which a manager node will make // certificate signing requests for node certificates. ExternalCAs []*ExternalCA `json:",omitempty"` - - // SigningCACert and SigningCAKey specify the desired signing root CA and - // root CA key for the swarm. When inspecting the cluster, the key will - // be redacted. - SigningCACert string `json:",omitempty"` - SigningCAKey string `json:",omitempty"` - - // If this value changes, and there is no specified signing cert and key, - // then the swarm is forced to generate a new root certificate ane key. - ForceRotate uint64 `json:",omitempty"` } // ExternalCAProtocol represents type of external CA. @@ -138,31 +126,23 @@ type ExternalCA struct { // Options is a set of additional key/value pairs whose interpretation // depends on the specified CA type. Options map[string]string `json:",omitempty"` - - // CACert specifies which root CA is used by this external CA. This certificate must - // be in PEM format. - CACert string } // InitRequest is the request used to init a swarm. type InitRequest struct { ListenAddr string AdvertiseAddr string - DataPathAddr string ForceNewCluster bool Spec Spec AutoLockManagers bool - Availability NodeAvailability } // JoinRequest is the request used to join a swarm. type JoinRequest struct { ListenAddr string AdvertiseAddr string - DataPathAddr string RemoteAddrs []string JoinToken string // accept by secret - Availability NodeAvailability } // UnlockRequest is the request used to unlock a swarm. @@ -197,10 +177,10 @@ type Info struct { Error string RemoteManagers []Peer - Nodes int `json:",omitempty"` - Managers int `json:",omitempty"` + Nodes int + Managers int - Cluster *ClusterInfo `json:",omitempty"` + Cluster ClusterInfo } // Peer represents a peer. diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index ff11b07e74..ace12cc89f 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -1,10 +1,6 @@ package swarm -import ( - "time" - - "github.com/docker/docker/api/types/swarm/runtime" -) +import "time" // TaskState represents the state of a task. type TaskState string @@ -51,16 +47,11 @@ type Task struct { Status TaskStatus `json:",omitempty"` DesiredState TaskState `json:",omitempty"` NetworksAttachments []NetworkAttachment `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` } // TaskSpec represents the spec of a task. type TaskSpec struct { - // ContainerSpec and PluginSpec are mutually exclusive. - // PluginSpec will only be used when the `Runtime` field is set to `plugin` - ContainerSpec *ContainerSpec `json:",omitempty"` - PluginSpec *runtime.PluginSpec `json:",omitempty"` - + ContainerSpec ContainerSpec `json:",omitempty"` Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` Placement *Placement `json:",omitempty"` @@ -74,40 +65,12 @@ type TaskSpec struct { // ForceUpdate is a counter that triggers an update even if no relevant // parameters have been changed. ForceUpdate uint64 - - Runtime RuntimeType `json:",omitempty"` } // Resources represents resources (CPU/Memory). type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` - GenericResources []GenericResource `json:",omitempty"` -} - -// GenericResource represents a "user defined" resource which can -// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) -type GenericResource struct { - NamedResourceSpec *NamedGenericResource `json:",omitempty"` - DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` -} - -// NamedGenericResource represents a "user defined" resource which is defined -// as a string. -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) -type NamedGenericResource struct { - Kind string `json:",omitempty"` - Value string `json:",omitempty"` -} - -// DiscreteGenericResource represents a "user defined" resource which is defined -// as an integer -// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) -// Value is used to count the resource (SSD=5, HDD=3, ...) -type DiscreteGenericResource struct { - Kind string `json:",omitempty"` - Value int64 `json:",omitempty"` + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` } // ResourceRequirements represents resources requirements. @@ -118,26 +81,7 @@ type ResourceRequirements struct { // Placement represents orchestration parameters. type Placement struct { - Constraints []string `json:",omitempty"` - Preferences []PlacementPreference `json:",omitempty"` - - // Platforms stores all the platforms that the image can run on. - // This field is used in the platform filter for scheduling. If empty, - // then the platform filter is off, meaning there are no scheduling restrictions. - Platforms []Platform `json:",omitempty"` -} - -// PlacementPreference provides a way to make the scheduler aware of factors -// such as topology. -type PlacementPreference struct { - Spread *SpreadOver -} - -// SpreadOver is a scheduling preference that instructs the scheduler to spread -// tasks evenly over groups of nodes identified by labels. -type SpreadOver struct { - // label descriptor, such as engine.labels.az - SpreadDescriptor string + Constraints []string `json:",omitempty"` } // RestartPolicy represents the restart policy. diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go b/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go new file mode 100644 index 0000000000..869c08f863 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert_test.go @@ -0,0 +1,26 @@ +package time + +import ( + "testing" + "time" +) + +func TestDurationToSecondsString(t *testing.T) { + cases := []struct { + in time.Duration + expected string + }{ + {0 * time.Second, "0"}, + {1 * time.Second, "1"}, + {1 * time.Minute, "60"}, + {24 * time.Hour, "86400"}, + } + + for _, c := range cases { + s := DurationToSecondsString(c.in) + if s != c.expected { + t.Errorf("wrong value for input `%v`: expected `%s`, got `%s`", c.in, c.expected, s) + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go index ed9c1168b7..d3695ba723 100644 --- a/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -29,8 +29,10 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } var format string + var parseInLocation bool + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) if strings.Contains(value, ".") { if parseInLocation { @@ -82,7 +84,7 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp + // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp if strings.Contains(value, "-") { return "", err // was probably an RFC3339 like timestamp but the parser failed with an error } @@ -116,7 +118,7 @@ func ParseTimestamps(value string, def int64) (int64, int64, error) { if err != nil { return s, n, err } - // should already be in nanoseconds but just in case convert n to nanoseconds + // should already be in nanoseconds but just in case convert n to nanoseonds n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) return s, n, nil } diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp_test.go b/vendor/github.com/docker/docker/api/types/time/timestamp_test.go new file mode 100644 index 0000000000..a1651309d7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp_test.go @@ -0,0 +1,93 @@ +package time + +import ( + "fmt" + "testing" + "time" +) + +func TestGetTimestamp(t *testing.T) { + now := time.Now().In(time.UTC) + cases := []struct { + in, expected string + expectedErr bool + }{ + // Partial RFC3339 strings get parsed with second precision + {"2006-01-02T15:04:05.999999999+07:00", "1136189045.999999999", false}, + {"2006-01-02T15:04:05.999999999Z", "1136214245.999999999", false}, + {"2006-01-02T15:04:05.999999999", "1136214245.999999999", false}, + {"2006-01-02T15:04:05Z", "1136214245.000000000", false}, + {"2006-01-02T15:04:05", "1136214245.000000000", false}, + {"2006-01-02T15:04:0Z", "", true}, + {"2006-01-02T15:04:0", "", true}, + {"2006-01-02T15:04Z", "1136214240.000000000", false}, + {"2006-01-02T15:04+00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04-00:00", "1136214240.000000000", false}, + {"2006-01-02T15:04", "1136214240.000000000", false}, + {"2006-01-02T15:0Z", "", true}, + {"2006-01-02T15:0", "", true}, + {"2006-01-02T15Z", "1136214000.000000000", false}, + {"2006-01-02T15+00:00", "1136214000.000000000", false}, + {"2006-01-02T15-00:00", "1136214000.000000000", false}, + {"2006-01-02T15", "1136214000.000000000", false}, + {"2006-01-02T1Z", "1136163600.000000000", false}, + {"2006-01-02T1", "1136163600.000000000", false}, + {"2006-01-02TZ", "", true}, + {"2006-01-02T", "", true}, + {"2006-01-02+00:00", "1136160000.000000000", false}, + {"2006-01-02-00:00", "1136160000.000000000", false}, + {"2006-01-02-00:01", "1136160060.000000000", false}, + {"2006-01-02Z", "1136160000.000000000", false}, + {"2006-01-02", "1136160000.000000000", false}, + {"2015-05-13T20:39:09Z", "1431549549.000000000", false}, + + // unix timestamps returned as is + {"1136073600", "1136073600", false}, + {"1136073600.000000001", "1136073600.000000001", false}, + // Durations + {"1m", fmt.Sprintf("%d", now.Add(-1*time.Minute).Unix()), false}, + {"1.5h", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + {"1h30m", fmt.Sprintf("%d", now.Add(-90*time.Minute).Unix()), false}, + + // String fallback + {"invalid", "invalid", false}, + } + + for _, c := range cases { + o, err := GetTimestamp(c.in, now) + if o != c.expected || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong value for '%s'. expected:'%s' got:'%s' with error: `%s`", c.in, c.expected, o, err) + t.Fail() + } + } +} + +func TestParseTimestamps(t *testing.T) { + cases := []struct { + in string + def, expectedS, expectedN int64 + expectedErr bool + }{ + // unix timestamps + {"1136073600", 0, 1136073600, 0, false}, + {"1136073600.000000001", 0, 1136073600, 1, false}, + {"1136073600.0000000010", 0, 1136073600, 1, false}, + {"1136073600.00000001", 0, 1136073600, 10, false}, + {"foo.bar", 0, 0, 0, true}, + {"1136073600.bar", 0, 1136073600, 0, true}, + {"", -1, -1, 0, false}, + } + + for _, c := range cases { + s, n, err := ParseTimestamps(c.in, c.def) + if s != c.expectedS || + n != c.expectedN || + (err == nil && c.expectedErr) || + (err != nil && !c.expectedErr) { + t.Errorf("wrong values for input `%s` with default `%d` expected:'%d'seconds and `%d`nanosecond got:'%d'seconds and `%d`nanoseconds with error: `%s`", c.in, c.def, c.expectedS, c.expectedN, s, n, err) + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index f7ac772971..a82c3e88ef 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -17,6 +17,38 @@ import ( "github.com/docker/go-connections/nat" ) +// ContainerChange contains response of Engine API: +// GET "/containers/{name:.*}/changes" +type ContainerChange struct { + Kind int + Path string +} + +// ImageHistory contains response of Engine API: +// GET "/images/{name:.*}/history" +type ImageHistory struct { + ID string `json:"Id"` + Created int64 + CreatedBy string + Tags []string + Size int64 + Comment string +} + +// ImageDelete contains response of Engine API: +// DELETE "/images/{name:.*}" +type ImageDelete struct { + Untagged string `json:",omitempty"` + Deleted string `json:",omitempty"` +} + +// GraphDriverData returns Image's graph driver config info +// when calling inspect command +type GraphDriverData struct { + Name string + Data map[string]string +} + // RootFS returns Image's RootFS description including the layer IDs. type RootFS struct { Type string @@ -45,12 +77,6 @@ type ImageInspect struct { VirtualSize int64 GraphDriver GraphDriverData RootFS RootFS - Metadata ImageMetadata -} - -// ImageMetadata contains engine-local data about the image -type ImageMetadata struct { - LastTagTime time.Time `json:",omitempty"` } // Container contains response of Engine API: @@ -99,11 +125,17 @@ type ContainerStats struct { OSType string `json:"ostype"` } +// ContainerProcessList contains response of Engine API: +// GET "/containers/{name:.*}/top" +type ContainerProcessList struct { + Processes [][]string + Titles []string +} + // Ping contains response of Engine API: // GET "/_ping" type Ping struct { APIVersion string - OSType string Experimental bool } @@ -122,11 +154,11 @@ type Version struct { BuildTime string `json:",omitempty"` } -// Commit holds the Git-commit (SHA1) that a binary was built from, as reported -// in the version-string of external tools, such as containerd, or runC. +// Commit records a external tool actual commit id version along the +// one expect by dockerd as set at build time type Commit struct { - ID string // ID is the actual commit ID of external tool. - Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. + ID string + Expected string } // Info contains response of Engine API: @@ -168,7 +200,6 @@ type Info struct { RegistryConfig *registry.ServiceConfig NCPU int MemTotal int64 - GenericResources []swarm.GenericResource DockerRootDir string HTTPProxy string `json:"HttpProxy"` HTTPSProxy string `json:"HttpsProxy"` @@ -245,8 +276,6 @@ type PluginsInfo struct { Network []string // List of Authorization plugins registered Authorization []string - // List of Log plugins registered - Log []string } // ExecStartCheck is a temp struct used by execStart @@ -284,7 +313,7 @@ type Health struct { // ContainerState stores container's running state // it's part of ContainerJSONBase and will return by "inspect" command type ContainerState struct { - Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Status string Running bool Paused bool Restarting bool @@ -327,7 +356,6 @@ type ContainerJSONBase struct { Name string RestartCount int Driver string - Platform string MountLabel string ProcessLabel string AppArmorProfile string @@ -401,23 +429,19 @@ type MountPoint struct { // NetworkResource is the body of the "get network" http response message type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Created time.Time // Created is the time the network created - Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. - Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. - ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. - ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created - Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network - Services map[string]network.ServiceInfo `json:",omitempty"` + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network } // EndpointResource contains network resources allocated and used for a container in a network @@ -431,23 +455,12 @@ type EndpointResource struct { // NetworkCreate is the expected body of the "create network" http request message type NetworkCreate struct { - // Check for networks with duplicate names. - // Network is primarily keyed based on a random ID and not on the name. - // Network name is strictly a user-friendly alias to the network - // which is uniquely identified using ID. - // And there is no guaranteed way to check for duplicates. - // Option CheckDuplicate is there to provide a best effort checking of any networks - // which has the same name but it is not guaranteed to catch all name collisions. CheckDuplicate bool Driver string - Scope string EnableIPv6 bool IPAM *network.IPAM Internal bool Attachable bool - Ingress bool - ConfigOnly bool - ConfigFrom *network.ConfigReference Options map[string]string Labels map[string]string } @@ -476,12 +489,6 @@ type NetworkDisconnect struct { Force bool } -// NetworkInspectOptions holds parameters to inspect network -type NetworkInspectOptions struct { - Scope string - Verbose bool -} - // Checkpoint represents the details of a checkpoint type Checkpoint struct { Name string // Name is the name of the checkpoint @@ -496,11 +503,10 @@ type Runtime struct { // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume - BuilderSize int64 + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume } // ContainersPruneReport contains the response for Engine API: @@ -520,13 +526,7 @@ type VolumesPruneReport struct { // ImagesPruneReport contains the response for Engine API: // POST "/images/prune" type ImagesPruneReport struct { - ImagesDeleted []ImageDeleteResponseItem - SpaceReclaimed uint64 -} - -// BuildCachePruneReport contains the response for Engine API: -// POST "/build/prune" -type BuildCachePruneReport struct { + ImagesDeleted []ImageDelete SpaceReclaimed uint64 } @@ -547,29 +547,3 @@ type SecretCreateResponse struct { type SecretListOptions struct { Filters filters.Args } - -// ConfigCreateResponse contains the information returned to a client -// on the creation of a new config. -type ConfigCreateResponse struct { - // ID is the id of the created config. - ID string -} - -// ConfigListOptions holds parameters to list configs -type ConfigListOptions struct { - Filters filters.Args -} - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest string - Size int -} - -// BuildResult contains the image id of a successful build -type BuildResult struct { - ID string -} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md index 1ef911edb0..cdac50a53c 100644 --- a/vendor/github.com/docker/docker/api/types/versions/README.md +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -1,10 +1,10 @@ -# Legacy API type versions +## Legacy API type versions This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. -## Package name conventions +### Package name conventions The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: diff --git a/vendor/github.com/docker/docker/api/types/versions/compare_test.go b/vendor/github.com/docker/docker/api/types/versions/compare_test.go new file mode 100644 index 0000000000..c2b96869f7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare_test.go @@ -0,0 +1,26 @@ +package versions + +import ( + "testing" +) + +func assertVersion(t *testing.T, a, b string, result int) { + if r := compare(a, b); r != result { + t.Fatalf("Unexpected version comparison result. Found %d, expected %d", r, result) + } +} + +func TestCompareVersion(t *testing.T) { + assertVersion(t, "1.12", "1.12", 0) + assertVersion(t, "1.0.0", "1", 0) + assertVersion(t, "1", "1.0.0", 0) + assertVersion(t, "1.05.00.0156", "1.0.221.9289", 1) + assertVersion(t, "1", "1.0.1", -1) + assertVersion(t, "1.0.1", "1", 1) + assertVersion(t, "1.0.1", "1.0.2", -1) + assertVersion(t, "1.0.2", "1.0.3", -1) + assertVersion(t, "1.0.3", "1.1", -1) + assertVersion(t, "1.1", "1.1.1", -1) + assertVersion(t, "1.1.1", "1.1.2", -1) + assertVersion(t, "1.1.2", "1.2", -1) +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go index b5ee96a500..da4f8ebd9c 100644 --- a/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -7,9 +7,6 @@ package types // swagger:model Volume type Volume struct { - // Date/Time the volume was created. - CreatedAt string `json:"CreatedAt,omitempty"` - // Name of the volume driver used by the volume. // Required: true Driver string `json:"Driver"` @@ -47,23 +44,15 @@ type Volume struct { UsageData *VolumeUsageData `json:"UsageData,omitempty"` } -// VolumeUsageData Usage details about the volume. This information is used by the -// `GET /system/df` endpoint, and omitted in other endpoints. -// +// VolumeUsageData volume usage data // swagger:model VolumeUsageData type VolumeUsageData struct { - // The number of containers referencing this volume. This field - // is set to `-1` if the reference-count is not available. - // + // The number of containers referencing this volume. // Required: true RefCount int64 `json:"RefCount"` - // Amount of disk space used by the volume (in bytes). This information - // is only available for volumes created with the `"local"` volume - // driver. For volumes created with other volume drivers, this field - // is set to `-1` ("not available") - // + // The disk space used by the volume (local driver only) // Required: true Size int64 `json:"Size"` } diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go index 9f70e43ca4..679c16006f 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go @@ -4,7 +4,7 @@ package volume // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/generate-swagger-api.sh +// See hack/swagger-gen.sh // ---------------------------------------------------------------------------- // VolumesCreateBody volumes create body diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go index 833dad9330..7770bcb8fc 100644 --- a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go @@ -4,7 +4,7 @@ package volume // DO NOT EDIT THIS FILE // This file was generated by `swagger generate operation` // -// See hack/generate-swagger-api.sh +// See hack/swagger-gen.sh // ---------------------------------------------------------------------------- import "github.com/docker/docker/api/types" diff --git a/vendor/github.com/docker/docker/cli/cobra.go b/vendor/github.com/docker/docker/cli/cobra.go new file mode 100644 index 0000000000..139845cb1b --- /dev/null +++ b/vendor/github.com/docker/docker/cli/cobra.go @@ -0,0 +1,139 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// SetupRootCommand sets default usage, help, and error handling for the +// root command. +func SetupRootCommand(rootCmd *cobra.Command) { + cobra.AddTemplateFunc("hasSubCommands", hasSubCommands) + cobra.AddTemplateFunc("hasManagementSubCommands", hasManagementSubCommands) + cobra.AddTemplateFunc("operationSubCommands", operationSubCommands) + cobra.AddTemplateFunc("managementSubCommands", managementSubCommands) + + rootCmd.SetUsageTemplate(usageTemplate) + rootCmd.SetHelpTemplate(helpTemplate) + rootCmd.SetFlagErrorFunc(FlagErrorFunc) + rootCmd.SetHelpCommand(helpCommand) + + rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") + rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") +} + +// FlagErrorFunc prints an error message which matches the format of the +// docker/docker/cli error messages +func FlagErrorFunc(cmd *cobra.Command, err error) error { + if err == nil { + return err + } + + usage := "" + if cmd.HasSubCommands() { + usage = "\n\n" + cmd.UsageString() + } + return StatusError{ + Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), + StatusCode: 125, + } +} + +var helpCommand = &cobra.Command{ + Use: "help [command]", + Short: "Help about the command", + PersistentPreRun: func(cmd *cobra.Command, args []string) {}, + PersistentPostRun: func(cmd *cobra.Command, args []string) {}, + RunE: func(c *cobra.Command, args []string) error { + cmd, args, e := c.Root().Find(args) + if cmd == nil || e != nil || len(args) > 0 { + return fmt.Errorf("unknown help topic: %v", strings.Join(args, " ")) + } + + helpFunc := cmd.HelpFunc() + helpFunc(cmd, args) + return nil + }, +} + +func hasSubCommands(cmd *cobra.Command) bool { + return len(operationSubCommands(cmd)) > 0 +} + +func hasManagementSubCommands(cmd *cobra.Command) bool { + return len(managementSubCommands(cmd)) > 0 +} + +func operationSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && !sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +func managementSubCommands(cmd *cobra.Command) []*cobra.Command { + cmds := []*cobra.Command{} + for _, sub := range cmd.Commands() { + if sub.IsAvailableCommand() && sub.HasSubCommands() { + cmds = append(cmds, sub) + } + } + return cmds +} + +var usageTemplate = `Usage: + +{{- if not .HasSubCommands}} {{.UseLine}}{{end}} +{{- if .HasSubCommands}} {{ .CommandPath}} COMMAND{{end}} + +{{ .Short | trim }} + +{{- if gt .Aliases 0}} + +Aliases: + {{.NameAndAliases}} + +{{- end}} +{{- if .HasExample}} + +Examples: +{{ .Example }} + +{{- end}} +{{- if .HasFlags}} + +Options: +{{.Flags.FlagUsages | trimRightSpace}} + +{{- end}} +{{- if hasManagementSubCommands . }} + +Management Commands: + +{{- range managementSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} + +{{- end}} +{{- if hasSubCommands .}} + +Commands: + +{{- range operationSubCommands . }} + {{rpad .Name .NamePadding }} {{.Short}} +{{- end}} +{{- end}} + +{{- if .HasSubCommands }} + +Run '{{.CommandPath}} COMMAND --help' for more information on a command. +{{- end}} +` + +var helpTemplate = ` +{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/vendor/github.com/docker/docker/cli/error.go b/vendor/github.com/docker/docker/cli/error.go new file mode 100644 index 0000000000..62f62433b8 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/error.go @@ -0,0 +1,33 @@ +package cli + +import ( + "fmt" + "strings" +) + +// Errors is a list of errors. +// Useful in a loop if you don't want to return the error right away and you want to display after the loop, +// all the errors that happened during the loop. +type Errors []error + +func (errList Errors) Error() string { + if len(errList) < 1 { + return "" + } + + out := make([]string, len(errList)) + for i := range errList { + out[i] = errList[i].Error() + } + return strings.Join(out, ", ") +} + +// StatusError reports an unsuccessful exit by a command. +type StatusError struct { + Status string + StatusCode int +} + +func (e StatusError) Error() string { + return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) +} diff --git a/vendor/github.com/docker/docker/cli/required.go b/vendor/github.com/docker/docker/cli/required.go new file mode 100644 index 0000000000..8ee02c8429 --- /dev/null +++ b/vendor/github.com/docker/docker/cli/required.go @@ -0,0 +1,96 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// NoArgs validates args and returns an error if there are any args +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return nil + } + + if cmd.HasSubCommands() { + return fmt.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) + } + + return fmt.Errorf( + "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) +} + +// RequiresMinArgs returns an error if there is not at least min args +func RequiresMinArgs(min int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresMaxArgs returns an error if there is not at most max args +func RequiresMaxArgs(max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// RequiresRangeArgs returns an error if there is not at least min args and at most max args +func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) >= min && len(args) <= max { + return nil + } + return fmt.Errorf( + "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + min, + max, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} + +// ExactArgs returns an error if there is not the exact number of args +func ExactArgs(number int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) == number { + return nil + } + return fmt.Errorf( + "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", + cmd.CommandPath(), + number, + cmd.CommandPath(), + cmd.UseLine(), + cmd.Short, + ) + } +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 0000000000..059dfb3ce7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 0000000000..0effe498be --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create_test.go b/vendor/github.com/docker/docker/client/checkpoint_create_test.go new file mode 100644 index 0000000000..96e5187618 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create_test.go @@ -0,0 +1,73 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CheckpointCreate(context.Background(), "nothing", types.CheckpointCreateOptions{ + CheckpointID: "noting", + Exit: true, + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointCreate(t *testing.T) { + expectedContainerID := "container_id" + expectedCheckpointID := "checkpoint_id" + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + createOptions := &types.CheckpointCreateOptions{} + if err := json.NewDecoder(req.Body).Decode(createOptions); err != nil { + return nil, err + } + + if createOptions.CheckpointID != expectedCheckpointID { + return nil, fmt.Errorf("expected CheckpointID to be 'checkpoint_id', got %v", createOptions.CheckpointID) + } + + if !createOptions.Exit { + return nil, fmt.Errorf("expected Exit to be true") + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointCreate(context.Background(), expectedContainerID, types.CheckpointCreateOptions{ + CheckpointID: expectedCheckpointID, + Exit: true, + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 0000000000..e6e75588b1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete_test.go b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go new file mode 100644 index 0000000000..a78b050487 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointDeleteError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointDelete(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints/checkpoint_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.CheckpointDelete(context.Background(), "container_id", types.CheckpointDeleteOptions{ + CheckpointID: "checkpoint_id", + }) + + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 0000000000..8eb720a6b2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// CheckpointList returns the volumes configured in the docker host. +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + if err != nil { + return checkpoints, err + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + ensureReaderClosed(resp) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list_test.go b/vendor/github.com/docker/docker/client/checkpoint_list_test.go new file mode 100644 index 0000000000..6c90f61e8c --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestCheckpointListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestCheckpointList(t *testing.T) { + expectedURL := "/containers/container_id/checkpoints" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]types.Checkpoint{ + { + Name: "checkpoint", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + checkpoints, err := client.CheckpointList(context.Background(), "container_id", types.CheckpointListOptions{}) + if err != nil { + t.Fatal(err) + } + if len(checkpoints) != 1 { + t.Fatalf("expected 1 checkpoint, got %v", checkpoints) + } +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 0000000000..a9bdab6bb6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,246 @@ +/* +Package client is a Go client for the Docker Engine API. + +The "docker" command uses this package to communicate with the daemon. It can also +be used by your own Go applications to do anything the command-line interface does +– running containers, pulling images, managing swarms, etc. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/reference/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewEnvClient, or +configured manually with NewClient. + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewEnvClient() + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client + +import ( + "fmt" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" +) + +// DefaultVersion is the version of the current stable API +const DefaultVersion string = "1.25" + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool +} + +// NewEnvClient initializes a new API client based on environment variables. +// Use DOCKER_HOST to set the url to the docker server. +// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// Use DOCKER_CERT_PATH to load the tls certificates from. +// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func NewEnvClient() (*Client, error) { + var client *http.Client + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + } + } + + host := os.Getenv("DOCKER_HOST") + if host == "" { + host = DefaultDockerHost + } + version := os.Getenv("DOCKER_API_VERSION") + if version == "" { + version = DefaultVersion + } + + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if os.Getenv("DOCKER_API_VERSION") != "" { + cli.manualOverride = true + } + return cli, nil +} + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + proto, addr, basePath, err := ParseHost(host) + if err != nil { + return nil, err + } + + if client != nil { + if _, ok := client.Transport.(*http.Transport); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) + } + } else { + transport := new(http.Transport) + sockets.ConfigureTransport(transport, proto, addr) + client = &http.Client{ + Transport: transport, + } + } + + scheme := "http" + tlsConfig := resolveTLSConfig(client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + scheme = "https" + } + + return &Client{ + scheme: scheme, + host: host, + proto: proto, + addr: addr, + basePath: basePath, + client: client, + version: version, + customHTTPHeaders: httpHeaders, + }, nil +} + +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(p string, query url.Values) string { + var apiPath string + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + } else { + apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + } + + u := &url.URL{ + Path: apiPath, + } + if len(query) > 0 { + u.RawQuery = query.Encode() + } + return u.String() +} + +// ClientVersion returns the version string associated with this +// instance of the Client. Note that this value can be changed +// via the DOCKER_API_VERSION env var. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// UpdateClientVersion updates the version string associated with this +// instance of the Client. +func (cli *Client) UpdateClientVersion(v string) { + if !cli.manualOverride { + cli.version = v + } + +} + +// ParseHost verifies that the given host strings is valid. +func ParseHost(host string) (string, string, string, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return "", "", "", err + } + addr = parsed.Host + basePath = parsed.Path + } + return proto, addr, basePath, nil +} diff --git a/vendor/github.com/docker/docker/client/client_mock_test.go b/vendor/github.com/docker/docker/client/client_mock_test.go new file mode 100644 index 0000000000..0ab935d536 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_mock_test.go @@ -0,0 +1,45 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" +) + +func newMockClient(doer func(*http.Request) (*http.Response, error)) *http.Client { + return &http.Client{ + Transport: transportFunc(doer), + } +} + +func errorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + header := http.Header{} + header.Set("Content-Type", "application/json") + + body, err := json.Marshal(&types.ErrorResponse{ + Message: message, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader(body)), + Header: header, + }, nil + } +} + +func plainTextErrorMock(statusCode int, message string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: statusCode, + Body: ioutil.NopCloser(bytes.NewReader([]byte(message))), + }, nil + } +} diff --git a/vendor/github.com/docker/docker/client/client_test.go b/vendor/github.com/docker/docker/client/client_test.go new file mode 100644 index 0000000000..ee199c2bec --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_test.go @@ -0,0 +1,283 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNewEnvClient(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping unix only test for windows") + } + cases := []struct { + envs map[string]string + expectedError string + expectedVersion string + }{ + { + envs: map[string]string{}, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "invalid/path", + }, + expectedError: "Could not load X509 key pair: open invalid/path/cert.pem: no such file or directory. Make sure the key is not encrypted", + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_TLS_VERIFY": "1", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_CERT_PATH": "testdata/", + "DOCKER_HOST": "https://notaunixsocket", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_HOST": "host", + }, + expectedError: "unable to parse docker host `host`", + }, + { + envs: map[string]string{ + "DOCKER_HOST": "invalid://url", + }, + expectedVersion: DefaultVersion, + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "anything", + }, + expectedVersion: "anything", + }, + { + envs: map[string]string{ + "DOCKER_API_VERSION": "1.22", + }, + expectedVersion: "1.22", + }, + } + for _, c := range cases { + recoverEnvs := setupEnvs(t, c.envs) + apiclient, err := NewEnvClient() + if c.expectedError != "" { + if err == nil { + t.Errorf("expected an error for %v", c) + } else if err.Error() != c.expectedError { + t.Errorf("expected an error %s, got %s, for %v", c.expectedError, err.Error(), c) + } + } else { + if err != nil { + t.Error(err) + } + version := apiclient.ClientVersion() + if version != c.expectedVersion { + t.Errorf("expected %s, got %s, for %v", c.expectedVersion, version, c) + } + } + + if c.envs["DOCKER_TLS_VERIFY"] != "" { + // pedantic checking that this is handled correctly + tr := apiclient.client.Transport.(*http.Transport) + if tr.TLSClientConfig == nil { + t.Errorf("no tls config found when DOCKER_TLS_VERIFY enabled") + } + + if tr.TLSClientConfig.InsecureSkipVerify { + t.Errorf("tls verification should be enabled") + } + } + + recoverEnvs(t) + } +} + +func setupEnvs(t *testing.T, envs map[string]string) func(*testing.T) { + oldEnvs := map[string]string{} + for key, value := range envs { + oldEnv := os.Getenv(key) + oldEnvs[key] = oldEnv + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + return func(t *testing.T) { + for key, value := range oldEnvs { + err := os.Setenv(key, value) + if err != nil { + t.Error(err) + } + } + } +} + +func TestGetAPIPath(t *testing.T) { + cases := []struct { + v string + p string + q url.Values + e string + }{ + {"", "/containers/json", nil, "/containers/json"}, + {"", "/containers/json", url.Values{}, "/containers/json"}, + {"", "/containers/json", url.Values{"s": []string{"c"}}, "/containers/json?s=c"}, + {"1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/containers/json", nil, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{}, "/v1.22/containers/json"}, + {"v1.22", "/containers/json", url.Values{"s": []string{"c"}}, "/v1.22/containers/json?s=c"}, + {"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"}, + } + + for _, cs := range cases { + c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil) + if err != nil { + t.Fatal(err) + } + g := c.getAPIPath(cs.p, cs.q) + if g != cs.e { + t.Fatalf("Expected %s, got %s", cs.e, g) + } + + err = c.Close() + if nil != err { + t.Fatalf("close client failed, error message: %s", err) + } + } +} + +func TestParseHost(t *testing.T) { + cases := []struct { + host string + proto string + addr string + base string + err bool + }{ + {"", "", "", "", true}, + {"foobar", "", "", "", true}, + {"foo://bar", "foo", "bar", "", false}, + {"tcp://localhost:2476", "tcp", "localhost:2476", "", false}, + {"tcp://localhost:2476/path", "tcp", "localhost:2476", "/path", false}, + } + + for _, cs := range cases { + p, a, b, e := ParseHost(cs.host) + if cs.err && e == nil { + t.Fatalf("expected error, got nil") + } + if !cs.err && e != nil { + t.Fatal(e) + } + if cs.proto != p { + t.Fatalf("expected proto %s, got %s", cs.proto, p) + } + if cs.addr != a { + t.Fatalf("expected addr %s, got %s", cs.addr, a) + } + if cs.base != b { + t.Fatalf("expected base %s, got %s", cs.base, b) + } + } +} + +func TestUpdateClientVersion(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + splitQuery := strings.Split(req.URL.Path, "/") + queryVersion := splitQuery[1] + b, err := json.Marshal(types.Version{ + APIVersion: queryVersion, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + cases := []struct { + v string + }{ + {"1.20"}, + {"v1.21"}, + {"1.22"}, + {"v1.22"}, + } + + for _, cs := range cases { + client.UpdateClientVersion(cs.v) + r, err := client.ServerVersion(context.Background()) + if err != nil { + t.Fatal(err) + } + if strings.TrimPrefix(r.APIVersion, "v") != strings.TrimPrefix(cs.v, "v") { + t.Fatalf("Expected %s, got %s", cs.v, r.APIVersion) + } + } +} + +func TestNewEnvClientSetsDefaultVersion(t *testing.T) { + // Unset environment variables + envVarKeys := []string{ + "DOCKER_HOST", + "DOCKER_API_VERSION", + "DOCKER_TLS_VERIFY", + "DOCKER_CERT_PATH", + } + envVarValues := make(map[string]string) + for _, key := range envVarKeys { + envVarValues[key] = os.Getenv(key) + os.Setenv(key, "") + } + + client, err := NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != DefaultVersion { + t.Fatalf("Expected %s, got %s", DefaultVersion, client.version) + } + + expected := "1.22" + os.Setenv("DOCKER_API_VERSION", expected) + client, err = NewEnvClient() + if err != nil { + t.Fatal(err) + } + if client.version != expected { + t.Fatalf("Expected %s, got %s", expected, client.version) + } + + // Restore environment variables + for _, key := range envVarKeys { + os.Setenv(key, envVarValues[key]) + } +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 0000000000..89de892c85 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,6 @@ +// +build linux freebsd solaris openbsd darwin + +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 0000000000..07c0c7a774 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,4 @@ +package client + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 0000000000..eea4682158 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,37 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 0000000000..c766d62e40 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,53 @@ +package client + +import ( + "encoding/json" + "errors" + "net/url" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" + "golang.org/x/net/context" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + distributionRef, err := distreference.ParseNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + + tag = reference.GetTagFromNamedRef(distributionRef) + repository = distributionRef.Name() + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if options.Pause != true { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_commit_test.go b/vendor/github.com/docker/docker/client/container_commit_test.go new file mode 100644 index 0000000000..a844675368 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerCommitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCommit(context.Background(), "nothing", types.ContainerCommitOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerCommit(t *testing.T) { + expectedURL := "/commit" + expectedContainerID := "container_id" + specifiedReference := "repository_name:tag" + expectedRepositoryName := "repository_name" + expectedTag := "tag" + expectedComment := "comment" + expectedAuthor := "author" + expectedChanges := []string{"change1", "change2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + containerID := query.Get("container") + if containerID != expectedContainerID { + return nil, fmt.Errorf("container id not set in URL query properly. Expected '%s', got %s", expectedContainerID, containerID) + } + repo := query.Get("repo") + if repo != expectedRepositoryName { + return nil, fmt.Errorf("container repo not set in URL query properly. Expected '%s', got %s", expectedRepositoryName, repo) + } + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("container tag not set in URL query properly. Expected '%s', got %s'", expectedTag, tag) + } + comment := query.Get("comment") + if comment != expectedComment { + return nil, fmt.Errorf("container comment not set in URL query properly. Expected '%s', got %s'", expectedComment, comment) + } + author := query.Get("author") + if author != expectedAuthor { + return nil, fmt.Errorf("container author not set in URL query properly. Expected '%s', got %s'", expectedAuthor, author) + } + pause := query.Get("pause") + if pause != "0" { + return nil, fmt.Errorf("container pause not set in URL query properly. Expected 'true', got %v'", pause) + } + changes := query["changes"] + if len(changes) != len(expectedChanges) { + return nil, fmt.Errorf("expected container changes size to be '%d', got %d", len(expectedChanges), len(changes)) + } + b, err := json.Marshal(types.IDResponse{ + ID: "new_container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCommit(context.Background(), expectedContainerID, types.ContainerCommitOptions{ + Reference: specifiedReference, + Comment: expectedComment, + Author: expectedAuthor, + Changes: expectedChanges, + Pause: false, + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "new_container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 0000000000..8380eeabc9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,97 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + response, err := cli.head(ctx, urlStr, query, nil) + if err != nil { + return types.ContainerPathStat{}, err + } + defer ensureReaderClosed(response) + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + if err != nil { + return err + } + defer ensureReaderClosed(response) + + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := fmt.Sprintf("/containers/%s/archive", container) + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, err + } + + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy_test.go b/vendor/github.com/docker/docker/client/container_copy_test.go new file mode 100644 index 0000000000..706a20c818 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy_test.go @@ -0,0 +1,244 @@ +package client + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStatPathError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestContainerStatPathNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, err := client.ContainerStatPath(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestContainerStatPath(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "HEAD" { + return nil, fmt.Errorf("expected HEAD method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly") + } + content, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(content) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + stat, err := client.ContainerStatPath(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } +} + +func TestCopyToContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyToContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + err := client.CopyToContainer(context.Background(), "container_id", "path/to/file", bytes.NewReader([]byte("")), types.CopyToContainerOptions{}) + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyToContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "PUT" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + noOverwriteDirNonDir := query.Get("noOverwriteDirNonDir") + if noOverwriteDirNonDir != "true" { + return nil, fmt.Errorf("noOverwriteDirNonDir not set in URL query properly, expected true, got %s", noOverwriteDirNonDir) + } + + content, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + if err := req.Body.Close(); err != nil { + return nil, err + } + if string(content) != "content" { + return nil, fmt.Errorf("expected content to be 'content', got %s", string(content)) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.CopyToContainer(context.Background(), "container_id", expectedPath, bytes.NewReader([]byte("content")), types.CopyToContainerOptions{ + AllowOverwriteDirWithFile: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestCopyFromContainerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestCopyFromContainerNotStatusOKError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNoContent, "No content")), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil || err.Error() != "unexpected status code from daemon: 204" { + t.Fatalf("expected an unexpected status code error, got %v", err) + } +} + +func TestCopyFromContainerNoHeaderError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + _, _, err := client.CopyFromContainer(context.Background(), "container_id", "path/to/file") + if err == nil { + t.Fatalf("expected an error, got nothing") + } +} + +func TestCopyFromContainer(t *testing.T) { + expectedURL := "/containers/container_id/archive" + expectedPath := "path/to/file" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected PUT method, got %s", req.Method) + } + query := req.URL.Query() + path := query.Get("path") + if path != expectedPath { + return nil, fmt.Errorf("path not set in URL query properly, expected '%s', got %s", expectedPath, path) + } + + headercontent, err := json.Marshal(types.ContainerPathStat{ + Name: "name", + Mode: 0700, + }) + if err != nil { + return nil, err + } + base64PathStat := base64.StdEncoding.EncodeToString(headercontent) + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("content"))), + Header: http.Header{ + "X-Docker-Container-Path-Stat": []string{base64PathStat}, + }, + }, nil + }), + } + r, stat, err := client.CopyFromContainer(context.Background(), "container_id", expectedPath) + if err != nil { + t.Fatal(err) + } + if stat.Name != "name" { + t.Fatalf("expected container path stat name to be 'name', got '%s'", stat.Name) + } + if stat.Mode != 0700 { + t.Fatalf("expected container path stat mode to be 0700, got '%v'", stat.Mode) + } + content, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if err := r.Close(); err != nil { + t.Fatal(err) + } + if string(content) != "content" { + t.Fatalf("expected content to be 'content', got %s", string(content)) + } +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 0000000000..9f627aafa6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,50 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + query := url.Values{} + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + if err != nil { + if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { + return response, imageNotFoundError{config.Image} + } + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_create_test.go b/vendor/github.com/docker/docker/client/container_create_test.go new file mode 100644 index 0000000000..15dbd5ea01 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusInternalServerError, got %v", err) + } + + // 404 doesn't automagitally means an unknown image + client = &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + _, err = client.ContainerCreate(context.Background(), nil, nil, nil, "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error while testing StatusNotFound, got %v", err) + } +} + +func TestContainerCreateImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "No such image")), + } + _, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestContainerCreateWithName(t *testing.T) { + expectedURL := "/containers/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "container_name" { + return nil, fmt.Errorf("container name not set in URL query properly. Expected `container_name`, got %s", name) + } + b, err := json.Marshal(container.ContainerCreateCreatedBody{ + ID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerCreate(context.Background(), nil, nil, nil, "container_name") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 0000000000..1e3e554fc5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { + var changes []types.ContainerChange + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + ensureReaderClosed(serverResp) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_diff_test.go b/vendor/github.com/docker/docker/client/container_diff_test.go new file mode 100644 index 0000000000..1ce1117684 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff_test.go @@ -0,0 +1,61 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerDiffError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerDiff(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + +} + +func TestContainerDiff(t *testing.T) { + expectedURL := "/containers/container_id/changes" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal([]types.ContainerChange{ + { + Kind: 0, + Path: "/path/1", + }, + { + Kind: 1, + Path: "/path/2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + changes, err := client.ContainerDiff(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if len(changes) != 2 { + t.Fatalf("expected an array of 2 changes, got %v", changes) + } +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 0000000000..0665c54fbd --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,54 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec_test.go b/vendor/github.com/docker/docker/client/container_exec_test.go new file mode 100644 index 0000000000..0e296a50ad --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec_test.go @@ -0,0 +1,157 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerExecCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecCreate(t *testing.T) { + expectedURL := "/containers/container_id/exec" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + // FIXME validate the content is the given ExecConfig ? + if err := req.ParseForm(); err != nil { + return nil, err + } + execConfig := &types.ExecConfig{} + if err := json.NewDecoder(req.Body).Decode(execConfig); err != nil { + return nil, err + } + if execConfig.User != "user" { + return nil, fmt.Errorf("expected an execConfig with User == 'user', got %v", execConfig) + } + b, err := json.Marshal(types.IDResponse{ + ID: "exec_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ContainerExecCreate(context.Background(), "container_id", types.ExecConfig{ + User: "user", + }) + if err != nil { + t.Fatal(err) + } + if r.ID != "exec_id" { + t.Fatalf("expected `exec_id`, got %s", r.ID) + } +} + +func TestContainerExecStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecStart(context.Background(), "nothing", types.ExecStartCheck{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecStart(t *testing.T) { + expectedURL := "/exec/exec_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if err := req.ParseForm(); err != nil { + return nil, err + } + execStartCheck := &types.ExecStartCheck{} + if err := json.NewDecoder(req.Body).Decode(execStartCheck); err != nil { + return nil, err + } + if execStartCheck.Tty || !execStartCheck.Detach { + return nil, fmt.Errorf("expected execStartCheck{Detach:true,Tty:false}, got %v", execStartCheck) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerExecStart(context.Background(), "exec_id", types.ExecStartCheck{ + Detach: true, + Tty: false, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExecInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecInspect(t *testing.T) { + expectedURL := "/exec/exec_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(types.ContainerExecInspect{ + ExecID: "exec_id", + ContainerID: "container_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + inspect, err := client.ContainerExecInspect(context.Background(), "exec_id") + if err != nil { + t.Fatal(err) + } + if inspect.ExecID != "exec_id" { + t.Fatalf("expected ExecID to be `exec_id`, got %s", inspect.ExecID) + } + if inspect.ContainerID != "container_id" { + t.Fatalf("expected ContainerID `container_id`, got %s", inspect.ContainerID) + } +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 0000000000..52194f3d34 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,20 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_export_test.go b/vendor/github.com/docker/docker/client/container_export_test.go new file mode 100644 index 0000000000..5849fe9252 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export_test.go @@ -0,0 +1,50 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerExportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerExport(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExport(t *testing.T) { + expectedURL := "/containers/container_id/export" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerExport(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 0000000000..17f1809747 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, err + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ContainerJSON{}, nil, containerNotFoundError{containerID} + } + return types.ContainerJSON{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_inspect_test.go b/vendor/github.com/docker/docker/client/container_inspect_test.go new file mode 100644 index 0000000000..f1a6f4ac7d --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect_test.go @@ -0,0 +1,125 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.ContainerInspect(context.Background(), "unknown") + if err == nil || !IsErrContainerNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestContainerInspect(t *testing.T) { + expectedURL := "/containers/container_id/json" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } +} + +func TestContainerInspectNode(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + content, err := json.Marshal(types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: "container_id", + Image: "image", + Name: "name", + Node: &types.ContainerNode{ + ID: "container_node_id", + Addr: "container_node", + Labels: map[string]string{"foo": "bar"}, + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.ContainerInspect(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if r.ID != "container_id" { + t.Fatalf("expected `container_id`, got %s", r.ID) + } + if r.Image != "image" { + t.Fatalf("expected `image`, got %s", r.ID) + } + if r.Name != "name" { + t.Fatalf("expected `name`, got %s", r.ID) + } + if r.Node.ID != "container_node_id" { + t.Fatalf("expected `container_node_id`, got %s", r.Node.ID) + } + if r.Node.Addr != "container_node" { + t.Fatalf("expected `container_node`, got %s", r.Node.Addr) + } + foo, ok := r.Node.Labels["foo"] + if foo != "bar" || !ok { + t.Fatalf("expected `bar` for label `foo`") + } +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 0000000000..29f80c73ad --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,17 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_kill_test.go b/vendor/github.com/docker/docker/client/container_kill_test.go new file mode 100644 index 0000000000..9477b0abd2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerKillError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerKill(context.Background(), "nothing", "SIGKILL") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerKill(t *testing.T) { + expectedURL := "/containers/container_id/kill" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + signal := req.URL.Query().Get("signal") + if signal != "SIGKILL" { + return nil, fmt.Errorf("signal not set in URL query properly. Expected 'SIGKILL', got %s", signal) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerKill(context.Background(), "container_id", "SIGKILL") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 0000000000..4398912197 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,56 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + ensureReaderClosed(resp) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_list_test.go b/vendor/github.com/docker/docker/client/container_list_test.go new file mode 100644 index 0000000000..e41c6874b5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list_test.go @@ -0,0 +1,96 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestContainerListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerList(t *testing.T) { + expectedURL := "/containers/json" + expectedFilters := `{"before":{"container":true},"label":{"label1":true,"label2":true}}` + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + all := query.Get("all") + if all != "1" { + return nil, fmt.Errorf("all not set in URL query properly. Expected '1', got %s", all) + } + limit := query.Get("limit") + if limit != "0" { + return nil, fmt.Errorf("limit should have not be present in query. Expected '0', got %s", limit) + } + since := query.Get("since") + if since != "container" { + return nil, fmt.Errorf("since not set in URL query properly. Expected 'container', got %s", since) + } + before := query.Get("before") + if before != "" { + return nil, fmt.Errorf("before should have not be present in query, go %s", before) + } + size := query.Get("size") + if size != "1" { + return nil, fmt.Errorf("size not set in URL query properly. Expected '1', got %s", size) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("expected filters incoherent '%v' with actual filters %v", expectedFilters, filters) + } + + b, err := json.Marshal([]types.Container{ + { + ID: "container_id1", + }, + { + ID: "container_id2", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("before", "container") + containers, err := client.ContainerList(context.Background(), types.ContainerListOptions{ + Size: true, + All: true, + Since: "container", + Filters: filters, + }) + if err != nil { + t.Fatal(err) + } + if len(containers) != 2 { + t.Fatalf("expected 2 containers, got %v", containers) + } +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 0000000000..69056b6321 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_logs_test.go b/vendor/github.com/docker/docker/client/container_logs_test.go new file mode 100644 index 0000000000..99e31842c9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestContainerLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestContainerLogs(t *testing.T) { + expectedURL := "/containers/container_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ContainerLogs(context.Background(), "container_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ContainerLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ContainerLogs(ctx, "container_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 0000000000..412067a782 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_pause_test.go b/vendor/github.com/docker/docker/client/container_pause_test.go new file mode 100644 index 0000000000..0ee2f05d7e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerPauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerPause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerPause(t *testing.T) { + expectedURL := "/containers/container_id/pause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerPause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 0000000000..b582170867 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 0000000000..3a79590ced --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_remove_test.go b/vendor/github.com/docker/docker/client/container_remove_test.go new file mode 100644 index 0000000000..798c08b333 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRemove(t *testing.T) { + expectedURL := "/containers/container_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + volume := query.Get("v") + if volume != "1" { + return nil, fmt.Errorf("v (volume) not set in URL query properly. Expected '1', got %s", volume) + } + force := query.Get("force") + if force != "1" { + return nil, fmt.Errorf("force not set in URL query properly. Expected '1', got %s", force) + } + link := query.Get("link") + if link != "" { + return nil, fmt.Errorf("link should have not be present in query, go %s", link) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{ + RemoveVolumes: true, + Force: true, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 0000000000..0e718da7c6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,16 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_rename_test.go b/vendor/github.com/docker/docker/client/container_rename_test.go new file mode 100644 index 0000000000..732ebff5f7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename_test.go @@ -0,0 +1,46 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerRenameError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerRename(context.Background(), "nothing", "newNothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRename(t *testing.T) { + expectedURL := "/containers/container_id/rename" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + name := req.URL.Query().Get("name") + if name != "newName" { + return nil, fmt.Errorf("name not set in URL query properly. Expected 'newName', got %s", name) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerRename(context.Background(), "container_id", "newName") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 0000000000..66c3cc1940 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize_test.go b/vendor/github.com/docker/docker/client/container_resize_test.go new file mode 100644 index 0000000000..5b2efecdce --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize_test.go @@ -0,0 +1,82 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerExecResizeError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/containers/container_id/resize")), + } + + err := client.ContainerResize(context.Background(), "container_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func TestContainerExecResize(t *testing.T) { + client := &Client{ + client: newMockClient(resizeTransport("/exec/exec_id/resize")), + } + + err := client.ContainerExecResize(context.Background(), "exec_id", types.ResizeOptions{ + Height: 500, + Width: 600, + }) + if err != nil { + t.Fatal(err) + } +} + +func resizeTransport(expectedURL string) func(req *http.Request) (*http.Response, error) { + return func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + h := query.Get("h") + if h != "500" { + return nil, fmt.Errorf("h not set in URL query properly. Expected '500', got %s", h) + } + w := query.Get("w") + if w != "600" { + return nil, fmt.Errorf("w not set in URL query properly. Expected '600', got %s", w) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + } +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 0000000000..74d7455f02 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,22 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart_test.go b/vendor/github.com/docker/docker/client/container_restart_test.go new file mode 100644 index 0000000000..8c3cfd6a6f --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerRestartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerRestart(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerRestart(t *testing.T) { + expectedURL := "/containers/container_id/restart" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerRestart(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 0000000000..b1f08de416 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,24 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start_test.go b/vendor/github.com/docker/docker/client/container_start_test.go new file mode 100644 index 0000000000..5826fa8bc7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestContainerStartError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerStart(context.Background(), "nothing", types.ContainerStartOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStart(t *testing.T) { + expectedURL := "/containers/container_id/start" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + // we're not expecting any payload, but if one is supplied, check it is valid. + if req.Header.Get("Content-Type") == "application/json" { + var startConfig interface{} + if err := json.NewDecoder(req.Body).Decode(&startConfig); err != nil { + return nil, fmt.Errorf("Unable to parse json: %s", err) + } + } + + checkpoint := req.URL.Query().Get("checkpoint") + if checkpoint != "checkpoint_id" { + return nil, fmt.Errorf("checkpoint not set in URL query properly. Expected 'checkpoint_id', got %s", checkpoint) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.ContainerStart(context.Background(), "container_id", types.ContainerStartOptions{CheckpointID: "checkpoint_id"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 0000000000..4758c66e32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,26 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stats_test.go b/vendor/github.com/docker/docker/client/container_stats_test.go new file mode 100644 index 0000000000..7414f135c3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerStatsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerStats(context.Background(), "nothing", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStats(t *testing.T) { + expectedURL := "/containers/container_id/stats" + cases := []struct { + stream bool + expectedStream string + }{ + { + expectedStream: "0", + }, + { + stream: true, + expectedStream: "1", + }, + } + for _, c := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + + query := r.URL.Query() + stream := query.Get("stream") + if stream != c.expectedStream { + return nil, fmt.Errorf("stream not set in URL query properly. Expected '%s', got %s", c.expectedStream, stream) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + resp, err := client.ContainerStats(context.Background(), "container_id", c.stream) + if err != nil { + t.Fatal(err) + } + defer resp.Body.Close() + content, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 0000000000..b5418ae8c8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" + "golang.org/x/net/context" +) + +// ContainerStop stops a container without terminating the process. +// The process is blocked until the container stops or the timeout expires. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stop_test.go b/vendor/github.com/docker/docker/client/container_stop_test.go new file mode 100644 index 0000000000..c32cd691c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestContainerStopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + timeout := 0 * time.Second + err := client.ContainerStop(context.Background(), "nothing", &timeout) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerStop(t *testing.T) { + expectedURL := "/containers/container_id/stop" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + t := req.URL.Query().Get("t") + if t != "100" { + return nil, fmt.Errorf("t (timeout) not set in URL query properly. Expected '100', got %s", t) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + timeout := 100 * time.Second + err := client.ContainerStop(context.Background(), "container_id", &timeout) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 0000000000..4e7270ea22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client + +import ( + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { + var response types.ContainerProcessList + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_top_test.go b/vendor/github.com/docker/docker/client/container_top_test.go new file mode 100644 index 0000000000..7802be063e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top_test.go @@ -0,0 +1,74 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestContainerTopError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerTop(context.Background(), "nothing", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerTop(t *testing.T) { + expectedURL := "/containers/container_id/top" + expectedProcesses := [][]string{ + {"p1", "p2"}, + {"p3"}, + } + expectedTitles := []string{"title1", "title2"} + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + args := query.Get("ps_args") + if args != "arg1 arg2" { + return nil, fmt.Errorf("args not set in URL query properly. Expected 'arg1 arg2', got %v", args) + } + + b, err := json.Marshal(types.ContainerProcessList{ + Processes: [][]string{ + {"p1", "p2"}, + {"p3"}, + }, + Titles: []string{"title1", "title2"}, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + processList, err := client.ContainerTop(context.Background(), "container_id", []string{"arg1", "arg2"}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(expectedProcesses, processList.Processes) { + t.Fatalf("Processes: expected %v, got %v", expectedProcesses, processList.Processes) + } + if !reflect.DeepEqual(expectedTitles, processList.Titles) { + t.Fatalf("Titles: expected %v, got %v", expectedTitles, processList.Titles) + } +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 0000000000..5c76211256 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause_test.go b/vendor/github.com/docker/docker/client/container_unpause_test.go new file mode 100644 index 0000000000..2c42727191 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause_test.go @@ -0,0 +1,41 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestContainerUnpauseError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + err := client.ContainerUnpause(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUnpause(t *testing.T) { + expectedURL := "/containers/container_id/unpause" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ContainerUnpause(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 0000000000..5082f22dfa --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_update_test.go b/vendor/github.com/docker/docker/client/container_update_test.go new file mode 100644 index 0000000000..715bb7ca23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/container" + "golang.org/x/net/context" +) + +func TestContainerUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerUpdate(context.Background(), "nothing", container.UpdateConfig{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestContainerUpdate(t *testing.T) { + expectedURL := "/containers/container_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + b, err := json.Marshal(container.ContainerUpdateOKBody{}) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + _, err := client.ContainerUpdate(context.Background(), "container_id", container.UpdateConfig{ + Resources: container.Resources{ + CPUPeriod: 1, + }, + RestartPolicy: container.RestartPolicy{ + Name: "always", + }, + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 0000000000..93212c70ee --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/container" +) + +// ContainerWait pauses execution until a container exits. +// It returns the API status code as response of its readiness. +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + return -1, err + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + return -1, err + } + + return res.StatusCode, nil +} diff --git a/vendor/github.com/docker/docker/client/container_wait_test.go b/vendor/github.com/docker/docker/client/container_wait_test.go new file mode 100644 index 0000000000..9300bc0a54 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait_test.go @@ -0,0 +1,70 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types/container" + + "golang.org/x/net/context" +) + +func TestContainerWaitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + code, err := client.ContainerWait(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + if code != -1 { + t.Fatalf("expected a status code equal to '-1', got %d", code) + } +} + +func TestContainerWait(t *testing.T) { + expectedURL := "/containers/container_id/wait" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + b, err := json.Marshal(container.ContainerWaitOKBody{ + StatusCode: 15, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + code, err := client.ContainerWait(context.Background(), "container_id") + if err != nil { + t.Fatal(err) + } + if code != 15 { + t.Fatalf("expected a status code equal to '15', got %d", code) + } +} + +func ExampleClient_ContainerWait_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + _, err := client.ContainerWait(ctx, "container_id") + if err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 0000000000..03c80b39af --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + if err != nil { + return du, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 0000000000..bf6923f134 --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,278 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + _, ok := errors.Cause(err).(errConnectionFailed) + return ok +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +type notFound interface { + error + NotFound() bool // Is the error a NotFound error +} + +// IsErrNotFound returns true if the error is caused with an +// object (image, container, network, volume, …) is not found in the docker host. +func IsErrNotFound(err error) bool { + te, ok := err.(notFound) + return ok && te.NotFound() +} + +// imageNotFoundError implements an error returned when an image is not in the docker host. +type imageNotFoundError struct { + imageID string +} + +// NotFound indicates that this error type is of NotFound +func (e imageNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of an imageNotFoundError +func (e imageNotFoundError) Error() string { + return fmt.Sprintf("Error: No such image: %s", e.imageID) +} + +// IsErrImageNotFound returns true if the error is caused +// when an image is not found in the docker host. +func IsErrImageNotFound(err error) bool { + return IsErrNotFound(err) +} + +// containerNotFoundError implements an error returned when a container is not in the docker host. +type containerNotFoundError struct { + containerID string +} + +// NotFound indicates that this error type is of NotFound +func (e containerNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a containerNotFoundError +func (e containerNotFoundError) Error() string { + return fmt.Sprintf("Error: No such container: %s", e.containerID) +} + +// IsErrContainerNotFound returns true if the error is caused +// when a container is not found in the docker host. +func IsErrContainerNotFound(err error) bool { + return IsErrNotFound(err) +} + +// networkNotFoundError implements an error returned when a network is not in the docker host. +type networkNotFoundError struct { + networkID string +} + +// NotFound indicates that this error type is of NotFound +func (e networkNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a networkNotFoundError +func (e networkNotFoundError) Error() string { + return fmt.Sprintf("Error: No such network: %s", e.networkID) +} + +// IsErrNetworkNotFound returns true if the error is caused +// when a network is not found in the docker host. +func IsErrNetworkNotFound(err error) bool { + return IsErrNotFound(err) +} + +// volumeNotFoundError implements an error returned when a volume is not in the docker host. +type volumeNotFoundError struct { + volumeID string +} + +// NotFound indicates that this error type is of NotFound +func (e volumeNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a volumeNotFoundError +func (e volumeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such volume: %s", e.volumeID) +} + +// IsErrVolumeNotFound returns true if the error is caused +// when a volume is not found in the docker host. +func IsErrVolumeNotFound(err error) bool { + return IsErrNotFound(err) +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + _, ok := err.(unauthorizedError) + return ok +} + +// nodeNotFoundError implements an error returned when a node is not found. +type nodeNotFoundError struct { + nodeID string +} + +// Error returns a string representation of a nodeNotFoundError +func (e nodeNotFoundError) Error() string { + return fmt.Sprintf("Error: No such node: %s", e.nodeID) +} + +// NotFound indicates that this error type is of NotFound +func (e nodeNotFoundError) NotFound() bool { + return true +} + +// IsErrNodeNotFound returns true if the error is caused +// when a node is not found. +func IsErrNodeNotFound(err error) bool { + _, ok := err.(nodeNotFoundError) + return ok +} + +// serviceNotFoundError implements an error returned when a service is not found. +type serviceNotFoundError struct { + serviceID string +} + +// Error returns a string representation of a serviceNotFoundError +func (e serviceNotFoundError) Error() string { + return fmt.Sprintf("Error: No such service: %s", e.serviceID) +} + +// NotFound indicates that this error type is of NotFound +func (e serviceNotFoundError) NotFound() bool { + return true +} + +// IsErrServiceNotFound returns true if the error is caused +// when a service is not found. +func IsErrServiceNotFound(err error) bool { + _, ok := err.(serviceNotFoundError) + return ok +} + +// taskNotFoundError implements an error returned when a task is not found. +type taskNotFoundError struct { + taskID string +} + +// Error returns a string representation of a taskNotFoundError +func (e taskNotFoundError) Error() string { + return fmt.Sprintf("Error: No such task: %s", e.taskID) +} + +// NotFound indicates that this error type is of NotFound +func (e taskNotFoundError) NotFound() bool { + return true +} + +// IsErrTaskNotFound returns true if the error is caused +// when a task is not found. +func IsErrTaskNotFound(err error) bool { + _, ok := err.(taskNotFoundError) + return ok +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker server is version %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NoFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} + +// pluginNotFoundError implements an error returned when a plugin is not in the docker host. +type pluginNotFoundError struct { + name string +} + +// NotFound indicates that this error type is of NotFound +func (e pluginNotFoundError) NotFound() bool { + return true +} + +// Error returns a string representation of a pluginNotFoundError +func (e pluginNotFoundError) Error() string { + return fmt.Sprintf("Error: No such plugin: %s", e.name) +} + +// IsErrPluginNotFound returns true if the error is caused +// when a plugin is not found in the docker host. +func IsErrPluginNotFound(err error) bool { + return IsErrNotFound(err) +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 0000000000..af47aefa74 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,102 @@ +package client + +import ( + "encoding/json" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/events_test.go b/vendor/github.com/docker/docker/client/events_test.go new file mode 100644 index 0000000000..ba82d2f542 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" +) + +func TestEventsErrorInOptions(t *testing.T) { + errorCases := []struct { + options types.EventsOptions + expectedError string + }{ + { + options: types.EventsOptions{ + Since: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + { + options: types.EventsOptions{ + Until: "2006-01-02TZ", + }, + expectedError: `parsing time "2006-01-02TZ"`, + }, + } + for _, e := range errorCases { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), e.options) + err := <-errs + if err == nil || !strings.Contains(err.Error(), e.expectedError) { + t.Fatalf("expected an error %q, got %v", e.expectedError, err) + } + } +} + +func TestEventsErrorFromServer(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, errs := client.Events(context.Background(), types.EventsOptions{}) + err := <-errs + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestEvents(t *testing.T) { + + expectedURL := "/events" + + filters := filters.NewArgs() + filters.Add("type", events.ContainerEventType) + expectedFiltersJSON := fmt.Sprintf(`{"type":{"%s":true}}`, events.ContainerEventType) + + eventsCases := []struct { + options types.EventsOptions + events []events.Message + expectedEvents map[string]bool + expectedQueryParams map[string]string + }{ + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{}, + expectedEvents: make(map[string]bool), + }, + { + options: types.EventsOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": expectedFiltersJSON, + }, + events: []events.Message{ + { + Type: "container", + ID: "1", + Action: "create", + }, + { + Type: "container", + ID: "2", + Action: "die", + }, + { + Type: "container", + ID: "3", + Action: "create", + }, + }, + expectedEvents: map[string]bool{ + "1": true, + "2": true, + "3": true, + }, + }, + } + + for _, eventsCase := range eventsCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + + for key, expected := range eventsCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + buffer := new(bytes.Buffer) + + for _, e := range eventsCase.events { + b, _ := json.Marshal(e) + buffer.Write(b) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(buffer), + }, nil + }), + } + + messages, errs := client.Events(context.Background(), eventsCase.options) + + loop: + for { + select { + case err := <-errs: + if err != nil && err != io.EOF { + t.Fatal(err) + } + + break loop + case e := <-messages: + _, ok := eventsCase.expectedEvents[e.ID] + if !ok { + t.Fatalf("event received not expected with action %s & id %s", e.Action, e.ID) + } + } + } + } +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 0000000000..74c53f52b3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,177 @@ +package client + +import ( + "crypto/tls" + "errors" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "golang.org/x/net/context" +) + +// tlsClientCon holds tls information and a dialed connection. +type tlsClientCon struct { + *tls.Conn + rawConn net.Conn +} + +func (c *tlsClientCon) CloseWrite() error { + // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it + // on its underlying connection. + if conn, ok := c.rawConn.(types.CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", "tcp") + + conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + if err != nil { + if strings.Contains(err.Error(), "connection refused") { + return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + return types.HijackedResponse{}, err + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + _, err = clientconn.Do(req) + + rwc, br := clientconn.Hijack() + + return types.HijackedResponse{Conn: rwc, Reader: br}, err +} + +func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { + return tlsDialWithDialer(new(net.Dialer), network, addr, config) +} + +// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in +// order to return our custom tlsClientCon struct which holds both the tls.Conn +// object _and_ its underlying raw connection. The rationale for this is that +// we need to be able to close the write end of the connection when attaching, +// which tls.Conn does not provide. +func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { + // We want the Timeout and Deadline values from dialer to cover the + // whole process: TCP connection and TLS handshake. This means that we + // also need to start our own timers now. + timeout := dialer.Timeout + + if !dialer.Deadline.IsZero() { + deadlineTimeout := dialer.Deadline.Sub(time.Now()) + if timeout == 0 || deadlineTimeout < timeout { + timeout = deadlineTimeout + } + } + + var errChannel chan error + + if timeout != 0 { + errChannel = make(chan error, 2) + time.AfterFunc(timeout, func() { + errChannel <- errors.New("") + }) + } + + proxyDialer, err := sockets.DialerFromEnvironment(dialer) + if err != nil { + return nil, err + } + + rawConn, err := proxyDialer.Dial(network, addr) + if err != nil { + return nil, err + } + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := rawConn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + hostname := addr[:colonPos] + + // If no ServerName is set, infer the ServerName + // from the hostname we're connecting to. + if config.ServerName == "" { + // Make a copy to avoid polluting argument or default. + config = tlsconfig.Clone(config) + config.ServerName = hostname + } + + conn := tls.Client(rawConn, config) + + if timeout == 0 { + err = conn.Handshake() + } else { + go func() { + errChannel <- conn.Handshake() + }() + + err = <-errChannel + } + + if err != nil { + rawConn.Close() + return nil, err + } + + // This is Docker difference with standard's crypto/tls package: returned a + // wrapper which holds both the TLS and raw connections. + return &tlsClientCon{conn, rawConn}, nil +} + +func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + // Notice this isn't Go standard's tls.Dial function + return tlsDial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 0000000000..6fde75dcfd --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,123 @@ +package client + +import ( + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_build_test.go b/vendor/github.com/docker/docker/client/image_build_test.go new file mode 100644 index 0000000000..b9d04f817a --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build_test.go @@ -0,0 +1,233 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-units" +) + +func TestImageBuildError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageBuild(context.Background(), nil, types.ImageBuildOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageBuild(t *testing.T) { + v1 := "value1" + v2 := "value2" + emptyRegistryConfig := "bnVsbA==" + buildCases := []struct { + buildOptions types.ImageBuildOptions + expectedQueryParams map[string]string + expectedTags []string + expectedRegistryConfig string + }{ + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: true, + NoCache: true, + Remove: true, + ForceRemove: true, + PullParent: true, + }, + expectedQueryParams: map[string]string{ + "q": "1", + "nocache": "1", + "rm": "1", + "forcerm": "1", + "pull": "1", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + SuppressOutput: false, + NoCache: false, + Remove: false, + ForceRemove: false, + PullParent: false, + }, + expectedQueryParams: map[string]string{ + "q": "", + "nocache": "", + "rm": "0", + "forcerm": "", + "pull": "", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + RemoteContext: "remoteContext", + Isolation: container.Isolation("isolation"), + CPUSetCPUs: "2", + CPUSetMems: "12", + CPUShares: 20, + CPUQuota: 10, + CPUPeriod: 30, + Memory: 256, + MemorySwap: 512, + ShmSize: 10, + CgroupParent: "cgroup_parent", + Dockerfile: "Dockerfile", + }, + expectedQueryParams: map[string]string{ + "remote": "remoteContext", + "isolation": "isolation", + "cpusetcpus": "2", + "cpusetmems": "12", + "cpushares": "20", + "cpuquota": "10", + "cpuperiod": "30", + "memory": "256", + "memswap": "512", + "shmsize": "10", + "cgroupparent": "cgroup_parent", + "dockerfile": "Dockerfile", + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + BuildArgs: map[string]*string{ + "ARG1": &v1, + "ARG2": &v2, + "ARG3": nil, + }, + }, + expectedQueryParams: map[string]string{ + "buildargs": `{"ARG1":"value1","ARG2":"value2","ARG3":null}`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + Ulimits: []*units.Ulimit{ + { + Name: "nproc", + Hard: 65557, + Soft: 65557, + }, + { + Name: "nofile", + Hard: 20000, + Soft: 40000, + }, + }, + }, + expectedQueryParams: map[string]string{ + "ulimits": `[{"Name":"nproc","Hard":65557,"Soft":65557},{"Name":"nofile","Hard":20000,"Soft":40000}]`, + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: emptyRegistryConfig, + }, + { + buildOptions: types.ImageBuildOptions{ + AuthConfigs: map[string]types.AuthConfig{ + "https://index.docker.io/v1/": { + Auth: "dG90bwo=", + }, + }, + }, + expectedQueryParams: map[string]string{ + "rm": "0", + }, + expectedTags: []string{}, + expectedRegistryConfig: "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289In19", + }, + } + for _, buildCase := range buildCases { + expectedURL := "/build" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check request headers + registryConfig := r.Header.Get("X-Registry-Config") + if registryConfig != buildCase.expectedRegistryConfig { + return nil, fmt.Errorf("X-Registry-Config header not properly set in the request. Expected '%s', got %s", buildCase.expectedRegistryConfig, registryConfig) + } + contentType := r.Header.Get("Content-Type") + if contentType != "application/tar" { + return nil, fmt.Errorf("Content-type header not properly set in the request. Expected 'application/tar', got %s", contentType) + } + + // Check query parameters + query := r.URL.Query() + for key, expected := range buildCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + + // Check tags + if len(buildCase.expectedTags) > 0 { + tags := query["t"] + if !reflect.DeepEqual(tags, buildCase.expectedTags) { + return nil, fmt.Errorf("t (tags) not set in URL query properly. Expected '%s', got %s", buildCase.expectedTags, tags) + } + } + + headers := http.Header{} + headers.Add("Server", "Docker/v1.23 (MyOS)") + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + Header: headers, + }, nil + }), + } + buildResponse, err := client.ImageBuild(context.Background(), nil, buildCase.buildOptions) + if err != nil { + t.Fatal(err) + } + if buildResponse.OSType != "MyOS" { + t.Fatalf("expected OSType to be 'MyOS', got %s", buildResponse.OSType) + } + response, err := ioutil.ReadAll(buildResponse.Body) + if err != nil { + t.Fatal(err) + } + buildResponse.Body.Close() + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } + } +} + +func TestGetDockerOS(t *testing.T) { + cases := map[string]string{ + "Docker/v1.22 (linux)": "linux", + "Docker/v1.22 (windows)": "windows", + "Foo/v1.22 (bar)": "", + } + for header, os := range cases { + g := getDockerOS(header) + if g != os { + t.Fatalf("Expected %s, got %s", os, g) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 0000000000..cf023a7186 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,34 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + query.Set("tag", tag) + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_create_test.go b/vendor/github.com/docker/docker/client/image_create_test.go new file mode 100644 index 0000000000..5c2edd2ad5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImageCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageCreate(context.Background(), "reference", types.ImageCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageCreate(t *testing.T) { + expectedURL := "/images/create" + expectedImage := "test:5000/my_image" + expectedTag := "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff" + expectedReference := fmt.Sprintf("%s@%s", expectedImage, expectedTag) + expectedRegistryAuth := "eyJodHRwczovL2luZGV4LmRvY2tlci5pby92MS8iOnsiYXV0aCI6ImRHOTBid289IiwiZW1haWwiOiJqb2huQGRvZS5jb20ifX0=" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + registryAuth := r.Header.Get("X-Registry-Auth") + if registryAuth != expectedRegistryAuth { + return nil, fmt.Errorf("X-Registry-Auth header not properly set in the request. Expected '%s', got %s", expectedRegistryAuth, registryAuth) + } + + query := r.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != expectedImage { + return nil, fmt.Errorf("fromImage not set in URL query properly. Expected '%s', got %s", expectedImage, fromImage) + } + + tag := query.Get("tag") + if tag != expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", expectedTag, tag) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + createResponse, err := client.ImageCreate(context.Background(), expectedReference, types.ImageCreateOptions{ + RegistryAuth: expectedRegistryAuth, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(createResponse) + if err != nil { + t.Fatal(err) + } + if err = createResponse.Close(); err != nil { + t.Fatal(err) + } + if string(response) != "body" { + t.Fatalf("expected Body to contain 'body' string, got %s", response) + } +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 0000000000..acb1ee9278 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { + var history []types.ImageHistory + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + ensureReaderClosed(serverResp) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_history_test.go b/vendor/github.com/docker/docker/client/image_history_test.go new file mode 100644 index 0000000000..729edb1ad5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history_test.go @@ -0,0 +1,60 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageHistoryError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageHistory(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageHistory(t *testing.T) { + expectedURL := "/images/image_id/history" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + b, err := json.Marshal([]types.ImageHistory{ + { + ID: "image_id1", + Tags: []string{"tag1", "tag2"}, + }, + { + ID: "image_id2", + Tags: []string{"tag1", "tag2"}, + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageHistories, err := client.ImageHistory(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if len(imageHistories) != 2 { + t.Fatalf("expected 2 containers, got %v", imageHistories) + } +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 0000000000..c6f154b249 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,37 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + //Check if the given image name can be resolved + if _, err := reference.ParseNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_import_test.go b/vendor/github.com/docker/docker/client/image_import_test.go new file mode 100644 index 0000000000..e309be74e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import_test.go @@ -0,0 +1,81 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageImportError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageImport(context.Background(), types.ImageImportSource{}, "image:tag", types.ImageImportOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageImport(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + fromSrc := query.Get("fromSrc") + if fromSrc != "image_source" { + return nil, fmt.Errorf("fromSrc not set in URL query properly. Expected 'image_source', got %s", fromSrc) + } + repo := query.Get("repo") + if repo != "repository_name:imported" { + return nil, fmt.Errorf("repo not set in URL query properly. Expected 'repository_name', got %s", repo) + } + tag := query.Get("tag") + if tag != "imported" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected 'imported', got %s", tag) + } + message := query.Get("message") + if message != "A message" { + return nil, fmt.Errorf("message not set in URL query properly. Expected 'A message', got %s", message) + } + changes := query["changes"] + expectedChanges := []string{"change1", "change2"} + if !reflect.DeepEqual(expectedChanges, changes) { + return nil, fmt.Errorf("changes not set in URL query properly. Expected %v, got %v", expectedChanges, changes) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + importResponse, err := client.ImageImport(context.Background(), types.ImageImportSource{ + Source: strings.NewReader("source"), + SourceName: "image_source", + }, "repository_name:imported", types.ImageImportOptions{ + Tag: "imported", + Message: "A message", + Changes: []string{"change1", "change2"}, + }) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(importResponse) + if err != nil { + t.Fatal(err) + } + importResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 0000000000..b3a64ce2f8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return types.ImageInspect{}, nil, imageNotFoundError{imageID} + } + return types.ImageInspect{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_inspect_test.go b/vendor/github.com/docker/docker/client/image_inspect_test.go new file mode 100644 index 0000000000..74a4e49805 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect_test.go @@ -0,0 +1,71 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageInspectImageNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ImageInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrImageNotFound(err) { + t.Fatalf("expected an imageNotFound error, got %v", err) + } +} + +func TestImageInspect(t *testing.T) { + expectedURL := "/images/image_id/json" + expectedTags := []string{"tag1", "tag2"} + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.ImageInspect{ + ID: "image_id", + RepoTags: expectedTags, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + imageInspect, _, err := client.ImageInspectWithRaw(context.Background(), "image_id") + if err != nil { + t.Fatal(err) + } + if imageInspect.ID != "image_id" { + t.Fatalf("expected `image_id`, got %s", imageInspect.ID) + } + if !reflect.DeepEqual(imageInspect.RepoTags, expectedTags) { + t.Fatalf("expected `%v`, got %v", expectedTags, imageInspect.RepoTags) + } +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 0000000000..f26464f67c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,45 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + ensureReaderClosed(serverResp) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_list_test.go b/vendor/github.com/docker/docker/client/image_list_test.go new file mode 100644 index 0000000000..7c4a46414d --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list_test.go @@ -0,0 +1,159 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestImageListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageList(context.Background(), types.ImageListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageList(t *testing.T) { + expectedURL := "/images/json" + + noDanglingfilters := filters.NewArgs() + noDanglingfilters.Add("dangling", "false") + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + filters.Add("dangling", "true") + + listCases := []struct { + options types.ImageListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ImageListOptions{}, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": "", + }, + }, + { + options: types.ImageListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"true":true},"label":{"label1":true,"label2":true}}`, + }, + }, + { + options: types.ImageListOptions{ + Filters: noDanglingfilters, + }, + expectedQueryParams: map[string]string{ + "all": "", + "filter": "", + "filters": `{"dangling":{"false":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + images, err := client.ImageList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } + } +} + +func TestImageListApiBefore125(t *testing.T) { + expectedFilter := "image:tag" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + query := req.URL.Query() + actualFilter := query.Get("filter") + if actualFilter != expectedFilter { + return nil, fmt.Errorf("filter not set in URL query properly. Expected '%s', got %s", expectedFilter, actualFilter) + } + actualFilters := query.Get("filters") + if actualFilters != "" { + return nil, fmt.Errorf("filters should have not been present, were with value: %s", actualFilters) + } + content, err := json.Marshal([]types.ImageSummary{ + { + ID: "image_id2", + }, + { + ID: "image_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + version: "1.24", + } + + filters := filters.NewArgs() + filters.Add("reference", "image:tag") + + options := types.ImageListOptions{ + Filters: filters, + } + + images, err := client.ImageList(context.Background(), options) + if err != nil { + t.Fatal(err) + } + if len(images) != 2 { + t.Fatalf("expected 2 images, got %v", images) + } +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 0000000000..77aaf1af36 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,30 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_load_test.go b/vendor/github.com/docker/docker/client/image_load_test.go new file mode 100644 index 0000000000..68dc14ff22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageLoadError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageLoad(context.Background(), nil, true) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageLoad(t *testing.T) { + expectedURL := "/images/load" + expectedInput := "inputBody" + expectedOutput := "outputBody" + loadCases := []struct { + quiet bool + responseContentType string + expectedResponseJSON bool + expectedQueryParams map[string]string + }{ + { + quiet: false, + responseContentType: "text/plain", + expectedResponseJSON: false, + expectedQueryParams: map[string]string{ + "quiet": "0", + }, + }, + { + quiet: true, + responseContentType: "application/json", + expectedResponseJSON: true, + expectedQueryParams: map[string]string{ + "quiet": "1", + }, + }, + } + for _, loadCase := range loadCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + contentType := req.Header.Get("Content-Type") + if contentType != "application/x-tar" { + return nil, fmt.Errorf("content-type not set in URL headers properly. Expected 'application/x-tar', got %s", contentType) + } + query := req.URL.Query() + for key, expected := range loadCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + headers := http.Header{} + headers.Add("Content-Type", loadCase.responseContentType) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + Header: headers, + }, nil + }), + } + + input := bytes.NewReader([]byte(expectedInput)) + imageLoadResponse, err := client.ImageLoad(context.Background(), input, loadCase.quiet) + if err != nil { + t.Fatal(err) + } + if imageLoadResponse.JSON != loadCase.expectedResponseJSON { + t.Fatalf("expected a JSON response, was not.") + } + body, err := ioutil.ReadAll(imageLoadResponse.Body) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected %s, got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 0000000000..5ef98b7f02 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 0000000000..3bffdb70e8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,46 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/reference" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { + repository, tag, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", repository) + if tag != "" && !options.All { + query.Set("tag", tag) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull_test.go b/vendor/github.com/docker/docker/client/image_pull_test.go new file mode 100644 index 0000000000..fe6bafed97 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull_test.go @@ -0,0 +1,199 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePullReferenceParseError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePull(context.Background(), "", types.ImagePullOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePullAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePullStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePullWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePullWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != "myimage" { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", "myimage", fromImage) + } + tag := query.Get("tag") + if tag != "latest" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "latest", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePull(context.Background(), "myimage", types.ImagePullOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePullWithoutErrors(t *testing.T) { + expectedURL := "/images/create" + expectedOutput := "hello world" + pullCases := []struct { + all bool + reference string + expectedImage string + expectedTag string + }{ + { + all: false, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "latest", + }, + { + all: false, + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + { + all: true, + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + all: true, + reference: "myimage:anything", + expectedImage: "myimage", + expectedTag: "", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + fromImage := query.Get("fromImage") + if fromImage != pullCase.expectedImage { + return nil, fmt.Errorf("fromimage not set in URL query properly. Expected '%s', got %s", pullCase.expectedImage, fromImage) + } + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePull(context.Background(), pullCase.reference, types.ImagePullOptions{ + All: pullCase.all, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 0000000000..8e73d28f56 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,54 @@ +package client + +import ( + "errors" + "io" + "net/http" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return nil, err + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + var tag = "" + if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { + tag = nameTaggedRef.Tag() + } + + query := url.Values{} + query.Set("tag", tag) + + resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_push_test.go b/vendor/github.com/docker/docker/client/image_push_test.go new file mode 100644 index 0000000000..b52da8b8dc --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push_test.go @@ -0,0 +1,180 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" +) + +func TestImagePushReferenceError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return nil, nil + }), + } + // An empty reference is an invalid reference + _, err := client.ImagePush(context.Background(), "", types.ImagePushOptions{}) + if err == nil || err.Error() != "repository name must have at least one component" { + t.Fatalf("expected an error, got %v", err) + } + // An canonical reference cannot be pushed + _, err = client.ImagePush(context.Background(), "repo@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", types.ImagePushOptions{}) + if err == nil || err.Error() != "cannot push a digest reference" { + t.Fatalf("expected an error, got %v", err) + } +} + +func TestImagePushAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImagePushStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImagePushWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImagePush(context.Background(), "myimage", types.ImagePushOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImagePushWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/myimage/push" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected %s, got %s", "IAmValid", auth) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != "tag" { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", "tag", tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + resp, err := client.ImagePush(context.Background(), "myimage:tag", types.ImagePushOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != "hello world" { + t.Fatalf("expected 'hello world', got %s", string(body)) + } +} + +func TestImagePushWithoutErrors(t *testing.T) { + expectedOutput := "hello world" + expectedURLFormat := "/images/%s/push" + pullCases := []struct { + reference string + expectedImage string + expectedTag string + }{ + { + reference: "myimage", + expectedImage: "myimage", + expectedTag: "", + }, + { + reference: "myimage:tag", + expectedImage: "myimage", + expectedTag: "tag", + }, + } + for _, pullCase := range pullCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + expectedURL := fmt.Sprintf(expectedURLFormat, pullCase.expectedImage) + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + tag := query.Get("tag") + if tag != pullCase.expectedTag { + return nil, fmt.Errorf("tag not set in URL query properly. Expected '%s', got %s", pullCase.expectedTag, tag) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(expectedOutput))), + }, nil + }), + } + resp, err := client.ImagePush(context.Background(), pullCase.reference, types.ImagePushOptions{}) + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp) + if err != nil { + t.Fatal(err) + } + if string(body) != expectedOutput { + t.Fatalf("expected '%s', got %s", expectedOutput, string(body)) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 0000000000..839e5311c4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + if err != nil { + return nil, err + } + + var dels []types.ImageDelete + err = json.NewDecoder(resp.body).Decode(&dels) + ensureReaderClosed(resp) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_remove_test.go b/vendor/github.com/docker/docker/client/image_remove_test.go new file mode 100644 index 0000000000..7b004f70e6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove_test.go @@ -0,0 +1,95 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestImageRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageRemove(t *testing.T) { + expectedURL := "/images/image_id" + removeCases := []struct { + force bool + pruneChildren bool + expectedQueryParams map[string]string + }{ + { + force: false, + pruneChildren: false, + expectedQueryParams: map[string]string{ + "force": "", + "noprune": "1", + }, + }, { + force: true, + pruneChildren: true, + expectedQueryParams: map[string]string{ + "force": "1", + "noprune": "", + }, + }, + } + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range removeCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + b, err := json.Marshal([]types.ImageDelete{ + { + Untagged: "image_id1", + }, + { + Deleted: "image_id", + }, + }) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + imageDeletes, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{ + Force: removeCase.force, + PruneChildren: removeCase.pruneChildren, + }) + if err != nil { + t.Fatal(err) + } + if len(imageDeletes) != 2 { + t.Fatalf("expected 2 deleted images, got %v", imageDeletes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 0000000000..ecac880a32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,22 @@ +package client + +import ( + "io" + "net/url" + + "golang.org/x/net/context" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_save_test.go b/vendor/github.com/docker/docker/client/image_save_test.go new file mode 100644 index 0000000000..8f0cf88640 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save_test.go @@ -0,0 +1,58 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "strings" +) + +func TestImageSaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSave(context.Background(), []string{"nothing"}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server error, got %v", err) + } +} + +func TestImageSave(t *testing.T) { + expectedURL := "/images/get" + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + query := r.URL.Query() + names := query["names"] + expectedNames := []string{"image_id1", "image_id2"} + if !reflect.DeepEqual(names, expectedNames) { + return nil, fmt.Errorf("names not set in URL query properly. Expected %v, got %v", names, expectedNames) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + saveResponse, err := client.ImageSave(context.Background(), []string{"image_id1", "image_id2"}) + if err != nil { + t.Fatal(err) + } + response, err := ioutil.ReadAll(saveResponse) + if err != nil { + t.Fatal(err) + } + saveResponse.Close() + if string(response) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(response)) + } +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 0000000000..b0fcd5c23d --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,51 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + ensureReaderClosed(resp) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_search_test.go b/vendor/github.com/docker/docker/client/image_search_test.go new file mode 100644 index 0000000000..b17bbd8343 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search_test.go @@ -0,0 +1,165 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "encoding/json" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" +) + +func TestImageSearchAnyError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestImageSearchStatusUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{}) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndPrivilegeFuncError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "", fmt.Errorf("Error requesting privilege") + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error requesting privilege" { + t.Fatalf("expected an error requesting privilege, got %v", err) + } +} + +func TestImageSearchWithUnauthorizedErrorAndAnotherUnauthorizedError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusUnauthorized, "Unauthorized error")), + } + privilegeFunc := func() (string, error) { + return "a-auth-header", nil + } + _, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + PrivilegeFunc: privilegeFunc, + }) + if err == nil || err.Error() != "Error response from daemon: Unauthorized error" { + t.Fatalf("expected an Unauthorized Error, got %v", err) + } +} + +func TestImageSearchWithPrivilegedFuncNoError(t *testing.T) { + expectedURL := "/images/search" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + auth := req.Header.Get("X-Registry-Auth") + if auth == "NotValid" { + return &http.Response{ + StatusCode: http.StatusUnauthorized, + Body: ioutil.NopCloser(bytes.NewReader([]byte("Invalid credentials"))), + }, nil + } + if auth != "IAmValid" { + return nil, fmt.Errorf("Invalid auth header : expected 'IAmValid', got %s", auth) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + privilegeFunc := func() (string, error) { + return "IAmValid", nil + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + RegistryAuth: "NotValid", + PrivilegeFunc: privilegeFunc, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected 1 result, got %v", results) + } +} + +func TestImageSearchWithoutErrors(t *testing.T) { + expectedURL := "/images/search" + filterArgs := filters.NewArgs() + filterArgs.Add("is-automated", "true") + filterArgs.Add("stars", "3") + + expectedFilters := `{"is-automated":{"true":true},"stars":{"3":true}}` + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + term := query.Get("term") + if term != "some-image" { + return nil, fmt.Errorf("term not set in URL query properly. Expected 'some-image', got %s", term) + } + filters := query.Get("filters") + if filters != expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", expectedFilters, filters) + } + content, err := json.Marshal([]registry.SearchResult{ + { + Name: "anything", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + results, err := client.ImageSearch(context.Background(), "some-image", types.ImageSearchOptions{ + Filters: filterArgs, + }) + if err != nil { + t.Fatal(err) + } + if len(results) != 1 { + t.Fatalf("expected a result, got %v", results) + } +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 0000000000..bdbf94add2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,34 @@ +package client + +import ( + "errors" + "fmt" + "net/url" + + "golang.org/x/net/context" + + distreference "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types/reference" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { + distributionRef, err := distreference.ParseNamed(ref) + if err != nil { + return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) + } + + if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + tag := reference.GetTagFromNamedRef(distributionRef) + + query := url.Values{} + query.Set("repo", distributionRef.Name()) + query.Set("tag", tag) + + resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/image_tag_test.go b/vendor/github.com/docker/docker/client/image_tag_test.go new file mode 100644 index 0000000000..7925db9f1b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag_test.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestImageTagError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "repo:tag") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +// Note: this is not testing all the InvalidReference as it's the reponsability +// of distribution/reference package. +func TestImageTagInvalidReference(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ImageTag(context.Background(), "image_id", "aa/asdf$$^/aa") + if err == nil || err.Error() != `Error parsing reference: "aa/asdf$$^/aa" is not a valid repository/tag` { + t.Fatalf("expected ErrReferenceInvalidFormat, got %v", err) + } +} + +func TestImageTag(t *testing.T) { + expectedURL := "/images/image_id/tag" + tagCases := []struct { + reference string + expectedQueryParams map[string]string + }{ + { + reference: "repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "repository", + "tag": "tag1", + }, + }, { + reference: "another_repository:latest", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "another_repository", + expectedQueryParams: map[string]string{ + "repo": "another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "latest", + }, + }, { + reference: "test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository:tag1", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "tag1", + }, + }, { + reference: "test:5000/test/another_repository", + expectedQueryParams: map[string]string{ + "repo": "test:5000/test/another_repository", + "tag": "latest", + }, + }, + } + for _, tagCase := range tagCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + query := req.URL.Query() + for key, expected := range tagCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + err := client.ImageTag(context.Background(), "image_id", tagCase.reference) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 0000000000..ac07961224 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + if err != nil { + return info, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/info_test.go b/vendor/github.com/docker/docker/client/info_test.go new file mode 100644 index 0000000000..79f23c8af2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestInfoServerError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.Info(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestInfoInvalidResponseJSONError(t *testing.T) { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("invalid json"))), + }, nil + }), + } + _, err := client.Info(context.Background()) + if err == nil || !strings.Contains(err.Error(), "invalid character") { + t.Fatalf("expected a 'invalid character' error, got %v", err) + } +} + +func TestInfo(t *testing.T) { + expectedURL := "/info" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + info := &types.Info{ + ID: "daemonID", + Containers: 3, + } + b, err := json.Marshal(info) + if err != nil { + return nil, err + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + info, err := client.Info(context.Background()) + if err != nil { + t.Fatal(err) + } + + if info.ID != "daemonID" { + t.Fatalf("expected daemonID, got %s", info.ID) + } + + if info.Containers != 3 { + t.Fatalf("expected 3 containers, got %d", info.Containers) + } +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 0000000000..05978039b7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,171 @@ +package client + +import ( + "io" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ContainerAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + ServerVersion(ctx context.Context) (types.Version, error) + UpdateClientVersion(v string) +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string) (int64, error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error + NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 0000000000..51da98ecdd --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 0000000000..cc90a3cbb9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 0000000000..600dc7196f --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,29 @@ +package client + +import ( + "encoding/json" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" + "golang.org/x/net/context" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns UnauthorizerError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + + if resp.statusCode == http.StatusUnauthorized { + return registry.AuthenticateOKBody{}, unauthorizedError{err} + } + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 0000000000..c022c17b5b --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,18 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" + "golang.org/x/net/context" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_connect_test.go b/vendor/github.com/docker/docker/client/network_connect_test.go new file mode 100644 index 0000000000..d472f4520c --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect_test.go @@ -0,0 +1,107 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +func TestNetworkConnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkConnectEmptyNilEndpointSettings(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig != nil { + return nil, fmt.Errorf("expected connect.EndpointConfig to be nil, got %v", connect.EndpointConfig) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", nil) + if err != nil { + t.Fatal(err) + } +} + +func TestNetworkConnect(t *testing.T) { + expectedURL := "/networks/network_id/connect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var connect types.NetworkConnect + if err := json.NewDecoder(req.Body).Decode(&connect); err != nil { + return nil, err + } + + if connect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", connect.Container) + } + + if connect.EndpointConfig.NetworkID != "NetworkID" { + return nil, fmt.Errorf("expected 'NetworkID', got %s", connect.EndpointConfig.NetworkID) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkConnect(context.Background(), "network_id", "container_id", &network.EndpointSettings{ + NetworkID: "NetworkID", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 0000000000..4067a541ff --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + if err != nil { + return response, err + } + + json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_create_test.go b/vendor/github.com/docker/docker/client/network_create_test.go new file mode 100644 index 0000000000..0e2457f89c --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create_test.go @@ -0,0 +1,72 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkCreate(t *testing.T) { + expectedURL := "/networks/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkCreateResponse{ + ID: "network_id", + Warning: "warning", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResponse, err := client.NetworkCreate(context.Background(), "mynetwork", types.NetworkCreate{ + CheckDuplicate: true, + Driver: "mydriver", + EnableIPv6: true, + Internal: true, + Options: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if networkResponse.ID != "network_id" { + t.Fatalf("expected networkResponse.ID to be 'network_id', got %s", networkResponse.ID) + } + if networkResponse.Warning != "warning" { + t.Fatalf("expected networkResponse.Warning to be 'warning', got %s", networkResponse.Warning) + } +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 0000000000..24b58e3c12 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,14 @@ +package client + +import ( + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect_test.go b/vendor/github.com/docker/docker/client/network_disconnect_test.go new file mode 100644 index 0000000000..b54a2b1ccf --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect_test.go @@ -0,0 +1,64 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkDisconnectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkDisconnect(t *testing.T) { + expectedURL := "/networks/network_id/disconnect" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + var disconnect types.NetworkDisconnect + if err := json.NewDecoder(req.Body).Decode(&disconnect); err != nil { + return nil, err + } + + if disconnect.Container != "container_id" { + return nil, fmt.Errorf("expected 'container_id', got %s", disconnect.Container) + } + + if !disconnect.Force { + return nil, fmt.Errorf("expected Force to be true, got %v", disconnect.Force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.NetworkDisconnect(context.Background(), "network_id", "container_id", true) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 0000000000..5ad4ea5bf3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { + var networkResource types.NetworkResource + resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return networkResource, nil, networkNotFoundError{networkID} + } + return networkResource, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect_test.go b/vendor/github.com/docker/docker/client/network_inspect_test.go new file mode 100644 index 0000000000..1f926d66ba --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestNetworkInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkInspectContainerNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.NetworkInspect(context.Background(), "unknown") + if err == nil || !IsErrNetworkNotFound(err) { + t.Fatalf("expected a containerNotFound error, got %v", err) + } +} + +func TestNetworkInspect(t *testing.T) { + expectedURL := "/networks/network_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + + content, err := json.Marshal(types.NetworkResource{ + Name: "mynetwork", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + r, err := client.NetworkInspect(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } + if r.Name != "mynetwork" { + t.Fatalf("expected `mynetwork`, got %s", r.Name) + } +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 0000000000..e566a93e23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,31 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + ensureReaderClosed(resp) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_list_test.go b/vendor/github.com/docker/docker/client/network_list_test.go new file mode 100644 index 0000000000..4d443496ac --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list_test.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +func TestNetworkListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NetworkList(context.Background(), types.NetworkListOptions{ + Filters: filters.NewArgs(), + }) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkList(t *testing.T) { + expectedURL := "/networks" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + options types.NetworkListOptions + expectedFilters string + }{ + { + options: types.NetworkListOptions{ + Filters: filters.NewArgs(), + }, + expectedFilters: "", + }, { + options: types.NetworkListOptions{ + Filters: noDanglingFilters, + }, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: danglingFilters, + }, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + options: types.NetworkListOptions{ + Filters: labelFilters, + }, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal([]types.NetworkResource{ + { + Name: "network", + Driver: "bridge", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + networkResources, err := client.NetworkList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(networkResources) != 1 { + t.Fatalf("expected 1 network resource, got %v", networkResources) + } + } +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 0000000000..7352a7f0c5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 0000000000..6bd6748924 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_remove_test.go b/vendor/github.com/docker/docker/client/network_remove_test.go new file mode 100644 index 0000000000..2a7b9640c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestNetworkRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNetworkRemove(t *testing.T) { + expectedURL := "/networks/network_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NetworkRemove(context.Background(), "network_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 0000000000..abf505d29c --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Node{}, nil, nodeNotFoundError{nodeID} + } + return swarm.Node{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_inspect_test.go b/vendor/github.com/docker/docker/client/node_inspect_test.go new file mode 100644 index 0000000000..fc13283084 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeInspectNodeNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.NodeInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrNodeNotFound(err) { + t.Fatalf("expected an nodeNotFoundError error, got %v", err) + } +} + +func TestNodeInspect(t *testing.T) { + expectedURL := "/nodes/node_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Node{ + ID: "node_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodeInspect, _, err := client.NodeInspectWithRaw(context.Background(), "node_id") + if err != nil { + t.Fatal(err) + } + if nodeInspect.ID != "node_id" { + t.Fatalf("expected `node_id`, got %s", nodeInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 0000000000..3e8440f08e --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + ensureReaderClosed(resp) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_list_test.go b/vendor/github.com/docker/docker/client/node_list_test.go new file mode 100644 index 0000000000..0251b5cce4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestNodeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.NodeList(context.Background(), types.NodeListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeList(t *testing.T) { + expectedURL := "/nodes" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.NodeListOptions + expectedQueryParams map[string]string + }{ + { + options: types.NodeListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.NodeListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Node{ + { + ID: "node_id1", + }, + { + ID: "node_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + nodes, err := client.NodeList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(nodes) != 2 { + t.Fatalf("expected 2 nodes, got %v", nodes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 0000000000..0a77f3d578 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_remove_test.go b/vendor/github.com/docker/docker/client/node_remove_test.go new file mode 100644 index 0000000000..f2f8adc4a3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove_test.go @@ -0,0 +1,69 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestNodeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: false}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeRemove(t *testing.T) { + expectedURL := "/nodes/node_id" + + removeCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, removeCase := range removeCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != removeCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", removeCase.expectedForce, force) + } + + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeRemove(context.Background(), "node_id", types.NodeRemoveOptions{Force: removeCase.force}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 0000000000..3ca9760282 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/node_update_test.go b/vendor/github.com/docker/docker/client/node_update_test.go new file mode 100644 index 0000000000..613ff104eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestNodeUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestNodeUpdate(t *testing.T) { + expectedURL := "/nodes/node_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.NodeUpdate(context.Background(), "node_id", swarm.Version{}, swarm.NodeSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 0000000000..22dcda24fd --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,30 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and return the value of the "Docker-Experimental" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + + return ping, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 0000000000..a660ba5733 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 0000000000..30467db742 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable_test.go b/vendor/github.com/docker/docker/client/plugin_disable_test.go new file mode 100644 index 0000000000..a4de45be2d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginDisableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginDisable(t *testing.T) { + expectedURL := "/plugins/plugin_name/disable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginDisable(context.Background(), "plugin_name", types.PluginDisableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 0000000000..95517c4b80 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable_test.go b/vendor/github.com/docker/docker/client/plugin_enable_test.go new file mode 100644 index 0000000000..b27681348f --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable_test.go @@ -0,0 +1,48 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginEnableError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginEnable(t *testing.T) { + expectedURL := "/plugins/plugin_name/enable" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginEnable(context.Background(), "plugin_name", types.PluginEnableOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 0000000000..89f39ee2c6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,32 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return nil, nil, pluginNotFoundError{name} + } + return nil, nil, err + } + + defer ensureReaderClosed(resp) + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect_test.go b/vendor/github.com/docker/docker/client/plugin_inspect_test.go new file mode 100644 index 0000000000..fae407eb9b --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.PluginInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginInspect(t *testing.T) { + expectedURL := "/plugins/plugin_name" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(types.Plugin{ + ID: "plugin_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + pluginInspect, _, err := client.PluginInspectWithRaw(context.Background(), "plugin_name") + if err != nil { + t.Fatal(err) + } + if pluginInspect.ID != "plugin_id" { + t.Fatalf("expected `plugin_id`, got %s", pluginInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 0000000000..3217c4cf39 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client + +import ( + "encoding/json" + "io" + "net/http" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + err = cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(err) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 0000000000..88c480a3e1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + resp, err := cli.get(ctx, "/plugins", nil, nil) + if err != nil { + return plugins, err + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + ensureReaderClosed(resp) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_list_test.go b/vendor/github.com/docker/docker/client/plugin_list_test.go new file mode 100644 index 0000000000..173e4b87f5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list_test.go @@ -0,0 +1,59 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestPluginListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginList(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginList(t *testing.T) { + expectedURL := "/plugins" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal([]*types.Plugin{ + { + ID: "plugin_id1", + }, + { + ID: "plugin_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + plugins, err := client.PluginList(context.Background()) + if err != nil { + t.Fatal(err) + } + if len(plugins) != 2 { + t.Fatalf("expected 2 plugins, got %v", plugins) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 0000000000..1e5f963251 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,17 @@ +package client + +import ( + "io" + + "golang.org/x/net/context" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_push_test.go b/vendor/github.com/docker/docker/client/plugin_push_test.go new file mode 100644 index 0000000000..d9f70cdff8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginPushError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginPush(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + auth := req.Header.Get("X-Registry-Auth") + if auth != "authtoken" { + return nil, fmt.Errorf("Invalid auth header : expected 'authtoken', got %s", auth) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + _, err := client.PluginPush(context.Background(), "plugin_name", "authtoken") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 0000000000..b017e4d348 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove_test.go b/vendor/github.com/docker/docker/client/plugin_remove_test.go new file mode 100644 index 0000000000..a15f1661f6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestPluginRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginRemove(t *testing.T) { + expectedURL := "/plugins/plugin_name" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginRemove(context.Background(), "plugin_name", types.PluginRemoveOptions{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 0000000000..3260d2a90d --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client + +import ( + "golang.org/x/net/context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_set_test.go b/vendor/github.com/docker/docker/client/plugin_set_test.go new file mode 100644 index 0000000000..2450254463 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestPluginSetError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestPluginSet(t *testing.T) { + expectedURL := "/plugins/plugin_name/set" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.PluginSet(context.Background(), "plugin_name", []string{"arg1"}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 0000000000..95a4356b97 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,37 @@ +package client + +import ( + "fmt" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" + "golang.org/x/net/context" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 0000000000..ac05363655 --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,247 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) +} + +// getWithContext sends an http request to the docker API using the method GET with a specific go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "GET", path, query, nil, headers) +} + +// postWithContext sends an http request to the docker API using the method POST with a specific go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "POST", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// put sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "PUT", path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == "POST" || method == "PUT") + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + return cli.doRequest(ctx, req) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1} + + resp, err := ctxhttp.Do(ctx, cli.client, req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + switch err { + case context.Canceled, context.DeadlineExceeded: + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.25/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + } + + if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return serverResp, err + } + if len(body) == 0 { + return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && + resp.Header.Get("Content-Type") == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return serverResp, fmt.Errorf("Error reading JSON: %v", err) + } + errorMessage = errorResponse.Message + } else { + errorMessage = string(body) + } + + return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) + } + + serverResp.body = resp.Body + serverResp.header = resp.Header + return serverResp, nil +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if body := response.body; body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/request_test.go b/vendor/github.com/docker/docker/client/request_test.go new file mode 100644 index 0000000000..63908aec4b --- /dev/null +++ b/vendor/github.com/docker/docker/client/request_test.go @@ -0,0 +1,92 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// TestSetHostHeader should set fake host for local communications, set real host +// for normal communications. +func TestSetHostHeader(t *testing.T) { + testURL := "/test" + testCases := []struct { + host string + expectedHost string + expectedURLHost string + }{ + { + "unix:///var/run/docker.sock", + "docker", + "/var/run/docker.sock", + }, + { + "npipe:////./pipe/docker_engine", + "docker", + "//./pipe/docker_engine", + }, + { + "tcp://0.0.0.0:4243", + "", + "0.0.0.0:4243", + }, + { + "tcp://localhost:4243", + "", + "localhost:4243", + }, + } + + for c, test := range testCases { + proto, addr, basePath, err := ParseHost(test.host) + if err != nil { + t.Fatal(err) + } + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, testURL) { + return nil, fmt.Errorf("Test Case #%d: Expected URL %q, got %q", c, testURL, req.URL) + } + if req.Host != test.expectedHost { + return nil, fmt.Errorf("Test Case #%d: Expected host %q, got %q", c, test.expectedHost, req.Host) + } + if req.URL.Host != test.expectedURLHost { + return nil, fmt.Errorf("Test Case #%d: Expected URL host %q, got %q", c, test.expectedURLHost, req.URL.Host) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(([]byte("")))), + }, nil + }), + + proto: proto, + addr: addr, + basePath: basePath, + } + + _, err = client.sendRequest(context.Background(), "GET", testURL, nil, nil, nil) + if err != nil { + t.Fatal(err) + } + } +} + +// TestPlainTextError tests the server returning an error in plain text for +// backwards compatibility with API versions <1.24. All other tests use +// errors returned as JSON +func TestPlainTextError(t *testing.T) { + client := &Client{ + client: newMockClient(plainTextErrorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ContainerList(context.Background(), types.ContainerListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 0000000000..de8b041567 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var headers map[string][]string + + var response types.SecretCreateResponse + resp, err := cli.post(ctx, "/secrets/create", nil, secret, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_create_test.go b/vendor/github.com/docker/docker/client/secret_create_test.go new file mode 100644 index 0000000000..cb378c77ff --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretCreate(t *testing.T) { + expectedURL := "/secrets/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.SecretCreateResponse{ + ID: "test_secret", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusCreated, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.SecretCreate(context.Background(), swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "test_secret" { + t.Fatalf("expected `test_secret`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 0000000000..f774576118 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect_test.go b/vendor/github.com/docker/docker/client/secret_inspect_test.go new file mode 100644 index 0000000000..423d986968 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretInspectSecretNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.SecretInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrSecretNotFound(err) { + t.Fatalf("expected an secretNotFoundError error, got %v", err) + } +} + +func TestSecretInspect(t *testing.T) { + expectedURL := "/secrets/secret_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Secret{ + ID: "secret_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secretInspect, _, err := client.SecretInspectWithRaw(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } + if secretInspect.ID != "secret_id" { + t.Fatalf("expected `secret_id`, got %s", secretInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 0000000000..7e9d5ec167 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list_test.go b/vendor/github.com/docker/docker/client/secret_list_test.go new file mode 100644 index 0000000000..1ac11cddb3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSecretListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SecretList(context.Background(), types.SecretListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretList(t *testing.T) { + expectedURL := "/secrets" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.SecretListOptions + expectedQueryParams map[string]string + }{ + { + options: types.SecretListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.SecretListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Secret{ + { + ID: "secret_id1", + }, + { + ID: "secret_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + secrets, err := client.SecretList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(secrets) != 2 { + t.Fatalf("expected 2 secrets, got %v", secrets) + } + } +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 0000000000..1955b988a9 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove_test.go b/vendor/github.com/docker/docker/client/secret_remove_test.go new file mode 100644 index 0000000000..f269f787d2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSecretRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretRemove(t *testing.T) { + expectedURL := "/secrets/secret_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretRemove(context.Background(), "secret_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 0000000000..b94e24aab0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,19 @@ +package client + +import ( + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretUpdate updates a Secret. Currently, the only part of a secret spec +// which can be updated is Labels. +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/secret_update_test.go b/vendor/github.com/docker/docker/client/secret_update_test.go new file mode 100644 index 0000000000..c620985bd5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSecretUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSecretUpdate(t *testing.T) { + expectedURL := "/secrets/secret_id/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.SecretUpdate(context.Background(), "secret_id", swarm.Version{}, swarm.SecretSpec{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 0000000000..3d1be225bd --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,30 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var headers map[string][]string + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + var response types.ServiceCreateResponse + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/service_create_test.go b/vendor/github.com/docker/docker/client/service_create_test.go new file mode 100644 index 0000000000..1e07382870 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create_test.go @@ -0,0 +1,57 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceCreate(t *testing.T) { + expectedURL := "/services/create" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + b, err := json.Marshal(types.ServiceCreateResponse{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(b)), + }, nil + }), + } + + r, err := client.ServiceCreate(context.Background(), swarm.ServiceSpec{}, types.ServiceCreateOptions{}) + if err != nil { + t.Fatal(err) + } + if r.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", r.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 0000000000..ca71cbde1a --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,33 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { + serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Service{}, nil, serviceNotFoundError{serviceID} + } + return swarm.Service{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_inspect_test.go b/vendor/github.com/docker/docker/client/service_inspect_test.go new file mode 100644 index 0000000000..e235cf0fef --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect_test.go @@ -0,0 +1,65 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceInspectServiceNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown") + if err == nil || !IsErrServiceNotFound(err) { + t.Fatalf("expected an serviceNotFoundError error, got %v", err) + } +} + +func TestServiceInspect(t *testing.T) { + expectedURL := "/services/service_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Service{ + ID: "service_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + serviceInspect, _, err := client.ServiceInspectWithRaw(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } + if serviceInspect.ID != "service_id" { + t.Fatalf("expected `service_id`, got %s", serviceInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 0000000000..c29e6d407d --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/services", query, nil) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + ensureReaderClosed(resp) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_list_test.go b/vendor/github.com/docker/docker/client/service_list_test.go new file mode 100644 index 0000000000..213981ef70 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestServiceListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceList(context.Background(), types.ServiceListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceList(t *testing.T) { + expectedURL := "/services" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.ServiceListOptions + expectedQueryParams map[string]string + }{ + { + options: types.ServiceListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.ServiceListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Service{ + { + ID: "service_id1", + }, + { + ID: "service_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + services, err := client.ServiceList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(services) != 2 { + t.Fatalf("expected 2 services, got %v", services) + } + } +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 0000000000..24384e3ec0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_logs_test.go b/vendor/github.com/docker/docker/client/service_logs_test.go new file mode 100644 index 0000000000..a6d002ba75 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs_test.go @@ -0,0 +1,133 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "strings" + "testing" + "time" + + "github.com/docker/docker/api/types" + + "golang.org/x/net/context" +) + +func TestServiceLogsError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + _, err := client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } + _, err = client.ServiceLogs(context.Background(), "service_id", types.ContainerLogsOptions{ + Since: "2006-01-02TZ", + }) + if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) { + t.Fatalf("expected a 'parsing time' error, got %v", err) + } +} + +func TestServiceLogs(t *testing.T) { + expectedURL := "/services/service_id/logs" + cases := []struct { + options types.ContainerLogsOptions + expectedQueryParams map[string]string + }{ + { + expectedQueryParams: map[string]string{ + "tail": "", + }, + }, + { + options: types.ContainerLogsOptions{ + Tail: "any", + }, + expectedQueryParams: map[string]string{ + "tail": "any", + }, + }, + { + options: types.ContainerLogsOptions{ + ShowStdout: true, + ShowStderr: true, + Timestamps: true, + Details: true, + Follow: true, + }, + expectedQueryParams: map[string]string{ + "tail": "", + "stdout": "1", + "stderr": "1", + "timestamps": "1", + "details": "1", + "follow": "1", + }, + }, + { + options: types.ContainerLogsOptions{ + // An complete invalid date, timestamp or go duration will be + // passed as is + Since: "invalid but valid", + }, + expectedQueryParams: map[string]string{ + "tail": "", + "since": "invalid but valid", + }, + }, + } + for _, logCase := range cases { + client := &Client{ + client: newMockClient(func(r *http.Request) (*http.Response, error) { + if !strings.HasPrefix(r.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, r.URL) + } + // Check query parameters + query := r.URL.Query() + for key, expected := range logCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("response"))), + }, nil + }), + } + body, err := client.ServiceLogs(context.Background(), "service_id", logCase.options) + if err != nil { + t.Fatal(err) + } + defer body.Close() + content, err := ioutil.ReadAll(body) + if err != nil { + t.Fatal(err) + } + if string(content) != "response" { + t.Fatalf("expected response to contain 'response', got %s", string(content)) + } + } +} + +func ExampleClient_ServiceLogs_withTimeout() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + client, _ := NewEnvClient() + reader, err := client.ServiceLogs(ctx, "service_id", types.ContainerLogsOptions{}) + if err != nil { + log.Fatal(err) + } + + _, err = io.Copy(os.Stdout, reader) + if err != nil && err != io.EOF { + log.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 0000000000..a9331f92c2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_remove_test.go b/vendor/github.com/docker/docker/client/service_remove_test.go new file mode 100644 index 0000000000..8e2ac259c1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestServiceRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceRemove(t *testing.T) { + expectedURL := "/services/service_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.ServiceRemove(context.Background(), "service_id") + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 0000000000..afa94d47e2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,41 @@ +package client + +import ( + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// ServiceUpdate updates a Service. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + headers map[string][]string + query = url.Values{} + ) + + if options.EncodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {options.EncodedRegistryAuth}, + } + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + var response types.ServiceUpdateResponse + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/service_update_test.go b/vendor/github.com/docker/docker/client/service_update_test.go new file mode 100644 index 0000000000..76bea176bf --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update_test.go @@ -0,0 +1,77 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +func TestServiceUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", swarm.Version{}, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestServiceUpdate(t *testing.T) { + expectedURL := "/services/service_id/update" + + updateCases := []struct { + swarmVersion swarm.Version + expectedVersion string + }{ + { + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 0, + }, + expectedVersion: "0", + }, + { + swarmVersion: swarm.Version{ + Index: 10, + }, + expectedVersion: "10", + }, + } + + for _, updateCase := range updateCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + version := req.URL.Query().Get("version") + if version != updateCase.expectedVersion { + return nil, fmt.Errorf("version not set in URL query properly, expected '%s', got %s", updateCase.expectedVersion, version) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("{}"))), + }, nil + }), + } + + _, err := client.ServiceUpdate(context.Background(), "service_id", updateCase.swarmVersion, swarm.ServiceSpec{}, types.ServiceUpdateOptions{}) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 0000000000..be28d32628 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 0000000000..fd45d066e3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInit initializes the Swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init_test.go b/vendor/github.com/docker/docker/client/swarm_init_test.go new file mode 100644 index 0000000000..811155aff4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmInitError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInit(context.Background(), swarm.InitRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInit(t *testing.T) { + expectedURL := "/swarm/init" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(`"body"`))), + }, nil + }), + } + + resp, err := client.SwarmInit(context.Background(), swarm.InitRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } + if resp != "body" { + t.Fatalf("Expected 'body', got %s", resp) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 0000000000..6d95cfc05e --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmInspect inspects the Swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect_test.go b/vendor/github.com/docker/docker/client/swarm_inspect_test.go new file mode 100644 index 0000000000..6432d172b4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect_test.go @@ -0,0 +1,56 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestSwarmInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.SwarmInspect(context.Background()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmInspect(t *testing.T) { + expectedURL := "/swarm" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Swarm{ + ClusterInfo: swarm.ClusterInfo{ + ID: "swarm_id", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + swarmInspect, err := client.SwarmInspect(context.Background()) + if err != nil { + t.Fatal(err) + } + if swarmInspect.ID != "swarm_id" { + t.Fatalf("expected `swarm_id`, got %s", swarmInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 0000000000..cda99930eb --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,13 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmJoin joins the Swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join_test.go b/vendor/github.com/docker/docker/client/swarm_join_test.go new file mode 100644 index 0000000000..31ef2a76ee --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join_test.go @@ -0,0 +1,51 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmJoinError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmJoin(t *testing.T) { + expectedURL := "/swarm/join" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmJoin(context.Background(), swarm.JoinRequest{ + ListenAddr: "0.0.0.0:2377", + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 0000000000..a4df732174 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,18 @@ +package client + +import ( + "net/url" + + "golang.org/x/net/context" +) + +// SwarmLeave leaves the Swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave_test.go b/vendor/github.com/docker/docker/client/swarm_leave_test.go new file mode 100644 index 0000000000..c96dac8120 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave_test.go @@ -0,0 +1,66 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestSwarmLeaveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmLeave(context.Background(), false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmLeave(t *testing.T) { + expectedURL := "/swarm/leave" + + leaveCases := []struct { + force bool + expectedForce string + }{ + { + expectedForce: "", + }, + { + force: true, + expectedForce: "1", + }, + } + + for _, leaveCase := range leaveCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + force := req.URL.Query().Get("force") + if force != leaveCase.expectedForce { + return nil, fmt.Errorf("force not set in URL query properly. expected '%s', got %s", leaveCase.expectedForce, force) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmLeave(context.Background(), leaveCase.force) + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 0000000000..addfb59f0a --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlockes locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + if err != nil { + return err + } + + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 0000000000..cc8eeb6554 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,22 @@ +package client + +import ( + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUpdate updates the Swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update_test.go b/vendor/github.com/docker/docker/client/swarm_update_test.go new file mode 100644 index 0000000000..3b23db078f --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update_test.go @@ -0,0 +1,49 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types/swarm" +) + +func TestSwarmUpdateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestSwarmUpdate(t *testing.T) { + expectedURL := "/swarm/update" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte(""))), + }, nil + }), + } + + err := client.SwarmUpdate(context.Background(), swarm.Version{}, swarm.Spec{}, swarm.UpdateFlags{}) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 0000000000..bc8058fc32 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + + "golang.org/x/net/context" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + if err != nil { + if serverResp.statusCode == http.StatusNotFound { + return swarm.Task{}, nil, taskNotFoundError{taskID} + } + return swarm.Task{}, nil, err + } + defer ensureReaderClosed(serverResp) + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect_test.go b/vendor/github.com/docker/docker/client/task_inspect_test.go new file mode 100644 index 0000000000..148cdad3a7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect_test.go @@ -0,0 +1,54 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, _, err := client.TaskInspectWithRaw(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskInspect(t *testing.T) { + expectedURL := "/tasks/task_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + content, err := json.Marshal(swarm.Task{ + ID: "task_id", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + taskInspect, _, err := client.TaskInspectWithRaw(context.Background(), "task_id") + if err != nil { + t.Fatal(err) + } + if taskInspect.ID != "task_id" { + t.Fatalf("expected `task_id`, got %s", taskInspect.ID) + } +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 0000000000..66324da959 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + ensureReaderClosed(resp) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_list_test.go b/vendor/github.com/docker/docker/client/task_list_test.go new file mode 100644 index 0000000000..2a9a4c4346 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list_test.go @@ -0,0 +1,94 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +func TestTaskListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.TaskList(context.Background(), types.TaskListOptions{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestTaskList(t *testing.T) { + expectedURL := "/tasks" + + filters := filters.NewArgs() + filters.Add("label", "label1") + filters.Add("label", "label2") + + listCases := []struct { + options types.TaskListOptions + expectedQueryParams map[string]string + }{ + { + options: types.TaskListOptions{}, + expectedQueryParams: map[string]string{ + "filters": "", + }, + }, + { + options: types.TaskListOptions{ + Filters: filters, + }, + expectedQueryParams: map[string]string{ + "filters": `{"label":{"label1":true,"label2":true}}`, + }, + }, + } + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + for key, expected := range listCase.expectedQueryParams { + actual := query.Get(key) + if actual != expected { + return nil, fmt.Errorf("%s not set in URL query properly. Expected '%s', got %s", key, expected, actual) + } + } + content, err := json.Marshal([]swarm.Task{ + { + ID: "task_id1", + }, + { + ID: "task_id2", + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + tasks, err := client.TaskList(context.Background(), listCase.options) + if err != nil { + t.Fatal(err) + } + if len(tasks) != 2 { + t.Fatalf("expected 2 tasks, got %v", tasks) + } + } +} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 0000000000..f04e601649 --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,28 @@ +package client + +import ( + "crypto/tls" + "errors" + "net/http" +) + +var errTLSConfigUnavailable = errors.New("TLSConfig unavailable") + +// transportFunc allows us to inject a mock transport for testing. We define it +// here so we can detect the tlsconfig and return nil for only this type. +type transportFunc func(*http.Request) (*http.Response, error) + +func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return tf(req) +} + +// resolveTLSConfig attempts to resolve the tls configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 0000000000..23d520ecb8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,33 @@ +package client + +import ( + "github.com/docker/docker/api/types/filters" + "net/url" + "regexp" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToParam(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 0000000000..933ceb4a49 --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + ensureReaderClosed(resp) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 0000000000..9620c87cbf --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + ensureReaderClosed(resp) + return volume, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create_test.go b/vendor/github.com/docker/docker/client/volume_create_test.go new file mode 100644 index 0000000000..9f1b2540b5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create_test.go @@ -0,0 +1,75 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeCreateError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{}) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeCreate(t *testing.T) { + expectedURL := "/volumes/create" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + + if req.Method != "POST" { + return nil, fmt.Errorf("expected POST method, got %s", req.Method) + } + + content, err := json.Marshal(types.Volume{ + Name: "volume", + Driver: "local", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volume, err := client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{ + Name: "myvolume", + Driver: "mydriver", + DriverOpts: map[string]string{ + "opt-key": "opt-value", + }, + }) + if err != nil { + t.Fatal(err) + } + if volume.Name != "volume" { + t.Fatalf("expected volume.Name to be 'volume', got %s", volume.Name) + } + if volume.Driver != "local" { + t.Fatalf("expected volume.Driver to be 'local', got %s", volume.Driver) + } + if volume.Mountpoint != "mountpoint" { + t.Fatalf("expected volume.Mountpoint to be 'mountpoint', got %s", volume.Mountpoint) + } +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 0000000000..3860e9b22c --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return volume, nil, volumeNotFoundError{volumeID} + } + return volume, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect_test.go b/vendor/github.com/docker/docker/client/volume_inspect_test.go new file mode 100644 index 0000000000..0d1d118828 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect_test.go @@ -0,0 +1,76 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +func TestVolumeInspectError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "nothing") + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeInspectNotFound(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusNotFound, "Server error")), + } + + _, err := client.VolumeInspect(context.Background(), "unknown") + if err == nil || !IsErrVolumeNotFound(err) { + t.Fatalf("expected a volumeNotFound error, got %v", err) + } +} + +func TestVolumeInspect(t *testing.T) { + expectedURL := "/volumes/volume_id" + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "GET" { + return nil, fmt.Errorf("expected GET method, got %s", req.Method) + } + content, err := json.Marshal(types.Volume{ + Name: "name", + Driver: "driver", + Mountpoint: "mountpoint", + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + v, err := client.VolumeInspect(context.Background(), "volume_id") + if err != nil { + t.Fatal(err) + } + if v.Name != "name" { + t.Fatalf("expected `name`, got %s", v.Name) + } + if v.Driver != "driver" { + t.Fatalf("expected `driver`, got %s", v.Driver) + } + if v.Mountpoint != "mountpoint" { + t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint) + } +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 0000000000..32247ce115 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,32 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody + query := url.Values{} + + if filter.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + ensureReaderClosed(resp) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list_test.go b/vendor/github.com/docker/docker/client/volume_list_test.go new file mode 100644 index 0000000000..f29639be23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list_test.go @@ -0,0 +1,98 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" + "golang.org/x/net/context" +) + +func TestVolumeListError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + _, err := client.VolumeList(context.Background(), filters.NewArgs()) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeList(t *testing.T) { + expectedURL := "/volumes" + + noDanglingFilters := filters.NewArgs() + noDanglingFilters.Add("dangling", "false") + + danglingFilters := filters.NewArgs() + danglingFilters.Add("dangling", "true") + + labelFilters := filters.NewArgs() + labelFilters.Add("label", "label1") + labelFilters.Add("label", "label2") + + listCases := []struct { + filters filters.Args + expectedFilters string + }{ + { + filters: filters.NewArgs(), + expectedFilters: "", + }, { + filters: noDanglingFilters, + expectedFilters: `{"dangling":{"false":true}}`, + }, { + filters: danglingFilters, + expectedFilters: `{"dangling":{"true":true}}`, + }, { + filters: labelFilters, + expectedFilters: `{"label":{"label1":true,"label2":true}}`, + }, + } + + for _, listCase := range listCases { + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + query := req.URL.Query() + actualFilters := query.Get("filters") + if actualFilters != listCase.expectedFilters { + return nil, fmt.Errorf("filters not set in URL query properly. Expected '%s', got %s", listCase.expectedFilters, actualFilters) + } + content, err := json.Marshal(volumetypes.VolumesListOKBody{ + Volumes: []*types.Volume{ + { + Name: "volume", + Driver: "local", + }, + }, + }) + if err != nil { + return nil, err + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader(content)), + }, nil + }), + } + + volumeResponse, err := client.VolumeList(context.Background(), listCase.filters) + if err != nil { + t.Fatal(err) + } + if len(volumeResponse.Volumes) != 1 { + t.Fatalf("expected 1 volume, got %v", volumeResponse.Volumes) + } + } +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 0000000000..a07e4ce637 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "golang.org/x/net/context" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 0000000000..6c26575b49 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client + +import ( + "net/url" + + "github.com/docker/docker/api/types/versions" + "golang.org/x/net/context" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/volume_remove_test.go b/vendor/github.com/docker/docker/client/volume_remove_test.go new file mode 100644 index 0000000000..1fe657349a --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove_test.go @@ -0,0 +1,47 @@ +package client + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "golang.org/x/net/context" +) + +func TestVolumeRemoveError(t *testing.T) { + client := &Client{ + client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err == nil || err.Error() != "Error response from daemon: Server error" { + t.Fatalf("expected a Server Error, got %v", err) + } +} + +func TestVolumeRemove(t *testing.T) { + expectedURL := "/volumes/volume_id" + + client := &Client{ + client: newMockClient(func(req *http.Request) (*http.Response, error) { + if !strings.HasPrefix(req.URL.Path, expectedURL) { + return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL) + } + if req.Method != "DELETE" { + return nil, fmt.Errorf("expected DELETE method, got %s", req.Method) + } + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutil.NopCloser(bytes.NewReader([]byte("body"))), + }, nil + }), + } + + err := client.VolumeRemove(context.Background(), "volume_id", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/docker/docker/pkg/README.md b/vendor/github.com/docker/docker/pkg/README.md new file mode 100644 index 0000000000..c4b78a8ad8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/README.md @@ -0,0 +1,11 @@ +pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. + +Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. +If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the +Docker organization, to facilitate re-use by other projects. However that is not the priority. + +The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core +Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! + +Because utility packages are small and neatly separated from the rest of the codebase, they are a good +place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go new file mode 100644 index 0000000000..e4dec3a5d1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go @@ -0,0 +1,11 @@ +// +build go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return c.Clone() +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go new file mode 100644 index 0000000000..0b816650ec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go @@ -0,0 +1,31 @@ +// +build go1.6,!go1.7 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.6 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go new file mode 100644 index 0000000000..0d5b448fec --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go @@ -0,0 +1,33 @@ +// +build go1.7,!go1.8 + +package tlsconfig + +import "crypto/tls" + +// Clone returns a clone of tls.Config. This function is provided for +// compatibility for go1.7 that doesn't include this method in stdlib. +func Clone(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/docker/docker/poule.yml b/vendor/github.com/docker/docker/poule.yml new file mode 100644 index 0000000000..61aab4551b --- /dev/null +++ b/vendor/github.com/docker/docker/poule.yml @@ -0,0 +1,88 @@ +# Add a "status/0-triage" to every newly opened pull request. +- triggers: + pull_request: [ opened ] + operations: + - type: label + settings: { + patterns: { + status/0-triage: [ ".*" ], + } + } + +# For every newly created or modified issue, assign label based on matching regexp using the `label` +# operation, as well as an Engine-specific version label using `version-label`. +- triggers: + issues: [ edited, opened, reopened ] + operations: + - type: label + settings: { + patterns: { + area/builder: [ "dockerfile", "docker build" ], + area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], + area/plugins: [ "docker plugin" ], + area/networking: [ "docker network", "ipvs", "vxlan" ], + area/runtime: [ "oci runtime error" ], + area/security/trust: [ "docker_content_trust" ], + area/swarm: [ "docker node", "docker service", "docker swarm" ], + platform/desktop: [ "docker for mac", "docker for windows" ], + platform/freebsd: [ "freebsd" ], + platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + } + } + - type: version-label + +# When a pull request is closed, attach it to the currently active milestone. +- triggers: + pull_request: [ closed ] + operations: + - type: version-milestone + +# Labeling a PR with `rebuild/` triggers a rebuild job for the associated +# configuration. The label is automatically removed after the rebuild is initiated. There's no such +# thing as "templating" in this configuration, so we need one operation for each type of +# configuration that can be triggered. +- triggers: + pull_request: [ labeled ] + operations: + - type: rebuild + settings: { + # When configurations are empty, the `rebuild` operation rebuilds all the currently + # known statuses for that pull request. + configurations: [], + label: "rebuild/*", + } + - type: rebuild + settings: { + configurations: [ arm ], + label: "rebuild/arm", + } + - type: rebuild + settings: { + configurations: [ experimental ], + label: "rebuild/experimental", + } + - type: rebuild + settings: { + configurations: [ janky ], + label: "rebuild/janky", + } + - type: rebuild + settings: { + configurations: [ userns ], + label: "rebuild/userns", + } + - type: rebuild + settings: { + configurations: [ vendor ], + label: "rebuild/vendor", + } + - type: rebuild + settings: { + configurations: [ win2lin ], + label: "rebuild/win2lin", + } + - type: rebuild + settings: { + configurations: [ windowsRS1 ], + label: "rebuild/windowsRS1", + } diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf new file mode 100644 index 0000000000..bb7718bc42 --- /dev/null +++ b/vendor/github.com/docker/docker/vendor.conf @@ -0,0 +1,140 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 +github.com/Microsoft/hcsshim v0.5.9 +github.com/Microsoft/go-winio v0.3.8 +github.com/Sirupsen/logrus v0.11.0 +github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.0 +github.com/mattn/go-sqlite3 v1.1.0 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +# forked golang.org/x/net package includes a patch for lazy loading trace templates +golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git +golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 +github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97 +github.com/docker/go-connections ecb4cb2dd420ada7df7f2593d6c25441f65f69f2 + +github.com/RackSec/srslog 456df3a81436d29ba874f3590eeeee25d666f8a5 +github.com/imdario/mergo 0.2.1 + +#get libnetwork packages +github.com/docker/libnetwork 45b40861e677e37cf27bc184eca5af92f8cdd32d +github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv 1d8431073ae03cdaedb198a89722f3aab6d418ef +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink 482f7a52b758233521878cb6c5904b6bd63f3457 +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707 +github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 +github.com/hashicorp/consul v0.5.2 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 + +# get graph and distribution packages +github.com/docker/distribution 28602af35aceda2f8d571bad7ca37a54cf0250bc +github.com/vbatts/tar-split v0.10.1 + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +# get desired notary commit, might also need to be updated in Dockerfile +github.com/docker/notary v0.4.2 + +google.golang.org/grpc v1.0.2 +github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f +github.com/docker/go v1.5.1-1-1-gbaf439e +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c + +# When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly +github.com/opencontainers/runc 9df8b306d01f59d3a8029be411de015b7304dd8f https://github.com/docker/runc.git # libcontainer +github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) +github.com/coreos/go-systemd v4 +github.com/godbus/dbus v4.0.0 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a + +# gelf logging driver deps +github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 + +github.com/fluent/fluent-logger-golang v1.2.1 +# fluent-logger-golang deps +github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c + +# fsnotify +github.com/fsnotify/fsnotify v1.2.11 + +# awslogs deps +github.com/aws/aws-sdk-go v1.4.22 +github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03 + +# gcplogs deps +golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be +google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 + +# native credentials +github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd + +# containerd +github.com/docker/containerd aa8187dbd3b7ad67d8e5e3a15115d3eef43a7ed1 +github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 + +# cluster +github.com/docker/swarmkit 1c7f003d75f091d5f7051ed982594420e4515f77 +github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 +github.com/gogo/protobuf v0.3 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 +github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87 +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 +github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 + +# cli +github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git +github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff + +# metrics +github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 + +# composefile +github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 +github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a +github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 +github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 diff --git a/vendor/github.com/docker/go-connections/CONTRIBUTING.md b/vendor/github.com/docker/go-connections/CONTRIBUTING.md new file mode 100644 index 0000000000..926dcc931d --- /dev/null +++ b/vendor/github.com/docker/go-connections/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing to Docker + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 0000000000..b55b37bc31 --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/MAINTAINERS b/vendor/github.com/docker/go-connections/MAINTAINERS new file mode 100644 index 0000000000..477be8b214 --- /dev/null +++ b/vendor/github.com/docker/go-connections/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/docker/go-connections/README.md b/vendor/github.com/docker/go-connections/README.md new file mode 100644 index 0000000000..d257e44fdc --- /dev/null +++ b/vendor/github.com/docker/go-connections/README.md @@ -0,0 +1,13 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-connections?status.svg)](https://godoc.org/github.com/docker/go-connections) + +# Introduction + +go-connections provides common package to work with network connections. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-connections) for examples and documentation. + +## License + +go-connections is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/docker/go-connections/circle.yml b/vendor/github.com/docker/go-connections/circle.yml new file mode 100644 index 0000000000..8a82ee8259 --- /dev/null +++ b/vendor/github.com/docker/go-connections/circle.yml @@ -0,0 +1,14 @@ +dependencies: + pre: + # setup ipv6 + - sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 net.ipv6.conf.default.disable_ipv6=0 net.ipv6.conf.all.disable_ipv6=0 + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-connections/doc.go b/vendor/github.com/docker/go-connections/doc.go new file mode 100644 index 0000000000..43e27247d4 --- /dev/null +++ b/vendor/github.com/docker/go-connections/doc.go @@ -0,0 +1,3 @@ +// Package connections provides libraries to work with network connections. +// This library is divided in several components for specific usage. +package connections diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 0000000000..4d5f5ae63a --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/nat_test.go b/vendor/github.com/docker/go-connections/nat/nat_test.go new file mode 100644 index 0000000000..787d5ac233 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat_test.go @@ -0,0 +1,583 @@ +package nat + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParsePort(t *testing.T) { + var ( + p int + err error + ) + + p, err = ParsePort("1234") + + if err != nil || p != 1234 { + t.Fatal("Parsing '1234' did not succeed") + } + + // FIXME currently this is a valid port. I don't think it should be. + // I'm leaving this test commented out until we make a decision. + // - erikh + + /* + p, err = ParsePort("0123") + + if err != nil { + t.Fatal("Successfully parsed port '0123' to '123'") + } + */ + + p, err = ParsePort("asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port 'asdf' succeeded") + } + + p, err = ParsePort("1asdf") + + if err == nil || p != 0 { + t.Fatal("Parsing port '1asdf' succeeded") + } +} + +func TestParsePortRangeToInt(t *testing.T) { + var ( + begin int + end int + err error + ) + + type TestRange struct { + Range string + Begin int + End int + } + validRanges := []TestRange{ + {"1234", 1234, 1234}, + {"1234-1234", 1234, 1234}, + {"1234-1235", 1234, 1235}, + {"8000-9000", 8000, 9000}, + {"0", 0, 0}, + {"0-0", 0, 0}, + } + + for _, r := range validRanges { + begin, end, err = ParsePortRangeToInt(r.Range) + + if err != nil || begin != r.Begin { + t.Fatalf("Parsing port range '%s' did not succeed. Expected begin %d, got %d", r.Range, r.Begin, begin) + } + if err != nil || end != r.End { + t.Fatalf("Parsing port range '%s' did not succeed. Expected end %d, got %d", r.Range, r.End, end) + } + } + + invalidRanges := []string{ + "asdf", + "1asdf", + "9000-8000", + "9000-", + "-8000", + "-8000-", + } + + for _, r := range invalidRanges { + begin, end, err = ParsePortRangeToInt(r) + + if err == nil || begin != 0 || end != 0 { + t.Fatalf("Parsing port range '%s' succeeded", r) + } + } +} + +func TestPort(t *testing.T) { + p, err := NewPort("tcp", "1234") + + if err != nil { + t.Fatalf("tcp, 1234 had a parsing issue: %v", err) + } + + if string(p) != "1234/tcp" { + t.Fatal("tcp, 1234 did not result in the string 1234/tcp") + } + + if p.Proto() != "tcp" { + t.Fatal("protocol was not tcp") + } + + if p.Port() != "1234" { + t.Fatal("port string value was not 1234") + } + + if p.Int() != 1234 { + t.Fatal("port int value was not 1234") + } + + p, err = NewPort("tcp", "asd1234") + if err == nil { + t.Fatal("tcp, asd1234 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1230") + if err == nil { + t.Fatal("tcp, 1234-1230 was supposed to fail") + } + + p, err = NewPort("tcp", "1234-1242") + if err != nil { + t.Fatalf("tcp, 1234-1242 had a parsing issue: %v", err) + } + + if string(p) != "1234-1242/tcp" { + t.Fatal("tcp, 1234-1242 did not result in the string 1234-1242/tcp") + } +} + +func TestSplitProtoPort(t *testing.T) { + var ( + proto string + port string + ) + + proto, port = SplitProtoPort("1234/tcp") + + if proto != "tcp" || port != "1234" { + t.Fatal("Could not split 1234/tcp properly") + } + + proto, port = SplitProtoPort("") + + if proto != "" || port != "" { + t.Fatal("parsing an empty string yielded surprising results", proto, port) + } + + proto, port = SplitProtoPort("1234") + + if proto != "tcp" || port != "1234" { + t.Fatal("tcp is not the default protocol for portspec '1234'", proto, port) + } + + proto, port = SplitProtoPort("1234/") + + if proto != "tcp" || port != "1234" { + t.Fatal("parsing '1234/' yielded:" + port + "/" + proto) + } + + proto, port = SplitProtoPort("/tcp") + + if proto != "" || port != "" { + t.Fatal("parsing '/tcp' yielded:" + port + "/" + proto) + } +} + +func TestParsePortSpecFull(t *testing.T) { + portMappings, err := ParsePortSpec("0.0.0.0:1234-1235:3333-3334/tcp") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "3333/tcp", + Binding: PortBinding{ + HostIP: "0.0.0.0", + HostPort: "1234", + }, + }, + { + Port: "3334/tcp", + Binding: PortBinding{ + HostIP: "0.0.0.0", + HostPort: "1235", + }, + }, + } + + assert.Equal(t, expected, portMappings) +} + +func TestPartPortSpecIPV6(t *testing.T) { + portMappings, err := ParsePortSpec("[2001:4860:0:2001::68]::333") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "333/tcp", + Binding: PortBinding{ + HostIP: "2001:4860:0:2001::68", + HostPort: "", + }, + }, + } + assert.Equal(t, expected, portMappings) +} + +func TestPartPortSpecIPV6WithHostPort(t *testing.T) { + portMappings, err := ParsePortSpec("[::1]:80:80") + assert.Nil(t, err) + + expected := []PortMapping{ + { + Port: "80/tcp", + Binding: PortBinding{ + HostIP: "::1", + HostPort: "80", + }, + }, + } + assert.Equal(t, expected, portMappings) +} + +func TestParsePortSpecs(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234/tcp", "2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234:1234/tcp", "2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234:1234/tcp", "0.0.0.0:2345:2345/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1234/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2345/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "0.0.0.0" { + t.Fatalf("HostIP is not 0.0.0.0 for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234:1234/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParsePortSpecsWithRange(t *testing.T) { + var ( + portMap map[Port]struct{} + bindingMap map[Port][]PortBinding + err error + ) + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236/tcp", "2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != "" { + t.Fatalf("HostPort should not be set for %s", portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"1234-1236:1234-1236/tcp", "2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 { + t.Fatalf("%s should have exactly one binding", portspec) + } + + if bindings[0].HostIP != "" { + t.Fatalf("HostIP should not be set for %s", portspec) + } + + if bindings[0].HostPort != port { + t.Fatalf("HostPort should be %s for %s", port, portspec) + } + } + + portMap, bindingMap, err = ParsePortSpecs([]string{"0.0.0.0:1234-1236:1234-1236/tcp", "0.0.0.0:2345-2347:2345-2347/udp"}) + + if err != nil { + t.Fatalf("Error while processing ParsePortSpecs: %s", err) + } + + if _, ok := portMap[Port("1235/tcp")]; !ok { + t.Fatal("1234/tcp was not parsed properly") + } + + if _, ok := portMap[Port("2346/udp")]; !ok { + t.Fatal("2345/udp was not parsed properly") + } + + for portspec, bindings := range bindingMap { + _, port := SplitProtoPort(string(portspec)) + if len(bindings) != 1 || bindings[0].HostIP != "0.0.0.0" || bindings[0].HostPort != port { + t.Fatalf("Expect single binding to port %s but found %s", port, bindings) + } + } + + _, _, err = ParsePortSpecs([]string{"localhost:1234-1236:1234-1236/tcp"}) + + if err == nil { + t.Fatal("Received no error while trying to parse a hostname instead of ip") + } +} + +func TestParseNetworkOptsPrivateOnly(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublic(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:8080:80"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "tcp" { + t.Logf("Expected tcp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "80" { + t.Logf("Expected 80 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "8080" { + t.Logf("Expected 8080 got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} + +func TestParseNetworkOptsPublicNoPort(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100"}) + + if err == nil { + t.Logf("Expected error Invalid containerPort") + t.Fail() + } + if ports != nil { + t.Logf("Expected nil got %s", ports) + t.Fail() + } + if bindings != nil { + t.Logf("Expected nil got %s", bindings) + t.Fail() + } +} + +func TestParseNetworkOptsNegativePorts(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100:-1:-1"}) + + if err == nil { + t.Fail() + } + if len(ports) != 0 { + t.Logf("Expected nil got %d", len(ports)) + t.Fail() + } + if len(bindings) != 0 { + t.Logf("Expected 0 got %d", len(bindings)) + t.Fail() + } +} + +func TestParseNetworkOptsUdp(t *testing.T) { + ports, bindings, err := ParsePortSpecs([]string{"192.168.1.100::6000/udp"}) + if err != nil { + t.Fatal(err) + } + if len(ports) != 1 { + t.Logf("Expected 1 got %d", len(ports)) + t.FailNow() + } + if len(bindings) != 1 { + t.Logf("Expected 1 got %d", len(bindings)) + t.FailNow() + } + for k := range ports { + if k.Proto() != "udp" { + t.Logf("Expected udp got %s", k.Proto()) + t.Fail() + } + if k.Port() != "6000" { + t.Logf("Expected 6000 got %s", k.Port()) + t.Fail() + } + b, exists := bindings[k] + if !exists { + t.Log("Binding does not exist") + t.FailNow() + } + if len(b) != 1 { + t.Logf("Expected 1 got %d", len(b)) + t.FailNow() + } + s := b[0] + if s.HostPort != "" { + t.Logf("Expected \"\" got %s", s.HostPort) + t.Fail() + } + if s.HostIP != "192.168.1.100" { + t.Fail() + } + } +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 0000000000..892adf8c66 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse_test.go b/vendor/github.com/docker/go-connections/nat/parse_test.go new file mode 100644 index 0000000000..2ac204a05b --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse_test.go @@ -0,0 +1,54 @@ +package nat + +import ( + "strings" + "testing" +) + +func TestParsePortRange(t *testing.T) { + if start, end, err := ParsePortRange("8000-8080"); err != nil || start != 8000 || end != 8080 { + t.Fatalf("Error: %s or Expecting {start,end} values {8000,8080} but found {%d,%d}.", err, start, end) + } +} + +func TestParsePortRangeEmpty(t *testing.T) { + if _, _, err := ParsePortRange(""); err == nil || err.Error() != "Empty string specified for ports." { + t.Fatalf("Expected error 'Empty string specified for ports.', got %v", err) + } +} + +func TestParsePortRangeWithNoRange(t *testing.T) { + start, end, err := ParsePortRange("8080") + if err != nil { + t.Fatal(err) + } + if start != 8080 || end != 8080 { + t.Fatalf("Expected start and end to be the same and equal to 8080, but were %v and %v", start, end) + } +} + +func TestParsePortRangeIncorrectRange(t *testing.T) { + if _, _, err := ParsePortRange("9000-8080"); err == nil || !strings.Contains(err.Error(), "Invalid range specified for the Port") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectEndRange(t *testing.T) { + if _, _, err := ParsePortRange("8000-a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("8000-30a"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} + +func TestParsePortRangeIncorrectStartRange(t *testing.T) { + if _, _, err := ParsePortRange("a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } + + if _, _, err := ParsePortRange("30a-8000"); err == nil || !strings.Contains(err.Error(), "invalid syntax") { + t.Fatalf("Expecting error 'Invalid range specified for the Port' but received %s.", err) + } +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 0000000000..ce950171e3 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/nat/sort_test.go b/vendor/github.com/docker/go-connections/nat/sort_test.go new file mode 100644 index 0000000000..88ed911156 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort_test.go @@ -0,0 +1,85 @@ +package nat + +import ( + "fmt" + "reflect" + "testing" +) + +func TestSortUniquePorts(t *testing.T) { + ports := []Port{ + Port("6379/tcp"), + Port("22/tcp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "22/tcp" { + t.Log(fmt.Sprint(first)) + t.Fail() + } +} + +func TestSortSamePortWithDifferentProto(t *testing.T) { + ports := []Port{ + Port("8888/tcp"), + Port("8888/udp"), + Port("6379/tcp"), + Port("6379/udp"), + } + + Sort(ports, func(ip, jp Port) bool { + return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && ip.Proto() == "tcp") + }) + + first := ports[0] + if fmt.Sprint(first) != "6379/tcp" { + t.Fail() + } +} + +func TestSortPortMap(t *testing.T) { + ports := []Port{ + Port("22/tcp"), + Port("22/udp"), + Port("8000/tcp"), + Port("6379/tcp"), + Port("9999/tcp"), + } + + portMap := PortMap{ + Port("22/tcp"): []PortBinding{ + {}, + }, + Port("8000/tcp"): []PortBinding{ + {}, + }, + Port("6379/tcp"): []PortBinding{ + {}, + {HostIP: "0.0.0.0", HostPort: "32749"}, + }, + Port("9999/tcp"): []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "40000"}, + }, + } + + SortPortMap(ports, portMap) + if !reflect.DeepEqual(ports, []Port{ + Port("9999/tcp"), + Port("6379/tcp"), + Port("8000/tcp"), + Port("22/tcp"), + Port("22/udp"), + }) { + t.Errorf("failed to prioritize port with explicit mappings, got %v", ports) + } + if pm := portMap[Port("6379/tcp")]; !reflect.DeepEqual(pm, []PortBinding{ + {HostIP: "0.0.0.0", HostPort: "32749"}, + {}, + }) { + t.Errorf("failed to prioritize bindings with explicit mappings, got %v", pm) + } +} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 0000000000..99846ffddb --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go new file mode 100644 index 0000000000..24dc1d1019 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket_test.go @@ -0,0 +1,39 @@ +package sockets + +import "testing" + +func TestInmemSocket(t *testing.T) { + l := NewInmemSocket("test", 0) + defer l.Close() + go func() { + for { + conn, err := l.Accept() + if err != nil { + return + } + conn.Write([]byte("hello")) + conn.Close() + } + }() + + conn, err := l.Dial("test", "test") + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 5) + _, err = conn.Read(buf) + if err != nil { + t.Fatal(err) + } + + if string(buf) != "hello" { + t.Fatalf("expected `hello`, got %s", string(buf)) + } + + l.Close() + conn, err = l.Dial("test", "test") + if err != errClosed { + t.Fatalf("expected `errClosed` error, got %v", err) + } +} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 0000000000..98e9a1dc61 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 0000000000..a1d7beb4d8 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 0000000000..386cf0dbbd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 0000000000..5c21644e1f --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 0000000000..53cbb6c79e --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 0000000000..a8b5dbb6fd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 0000000000..1ca0965e06 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 0000000000..9ca974539a --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" + +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 0000000000..1b31bbb8b1 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,244 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + tls.TLS_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault() *tls.Config { + return &tls.Config{ + // Avoid fallback to SSL protocols < TLS1.0 + MinVersion: tls.VersionTLS10, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault() *tls.Config { + return &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 0000000000..6b4c6a7c0d --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 0000000000..ee22df47cb --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_test.go b/vendor/github.com/docker/go-connections/tlsconfig/config_test.go new file mode 100644 index 0000000000..02131d6b8c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_test.go @@ -0,0 +1,651 @@ +package tlsconfig + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "io/ioutil" + "os" + "reflect" + "testing" +) + +// This is the currently active LetsEncrypt IdenTrust cross-signed CA cert. It expires Mar 17, 2021. +const ( + systemRootTrustedCert = ` +-----BEGIN CERTIFICATE----- +MIIEkjCCA3qgAwIBAgIQCgFBQgAAAVOFc2oLheynCDANBgkqhkiG9w0BAQsFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTE2MDMxNzE2NDA0NloXDTIxMDMxNzE2NDA0Nlow +SjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUxldCdzIEVuY3J5cHQxIzAhBgNVBAMT +GkxldCdzIEVuY3J5cHQgQXV0aG9yaXR5IFgzMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEAnNMM8FrlLke3cl03g7NoYzDq1zUmGSXhvb418XCSL7e4S0EF +q6meNQhY7LEqxGiHC6PjdeTm86dicbp5gWAf15Gan/PQeGdxyGkOlZHP/uaZ6WA8 +SMx+yk13EiSdRxta67nsHjcAHJyse6cF6s5K671B5TaYucv9bTyWaN8jKkKQDIZ0 +Z8h/pZq4UmEUEz9l6YKHy9v6Dlb2honzhT+Xhq+w3Brvaw2VFn3EK6BlspkENnWA +a6xK8xuQSXgvopZPKiAlKQTGdMDQMc2PMTiVFrqoM7hD8bEfwzB/onkxEz0tNvjj +/PIzark5McWvxI0NHWQWM6r6hCm21AvA2H3DkwIDAQABo4IBfTCCAXkwEgYDVR0T +AQH/BAgwBgEB/wIBADAOBgNVHQ8BAf8EBAMCAYYwfwYIKwYBBQUHAQEEczBxMDIG +CCsGAQUFBzABhiZodHRwOi8vaXNyZy50cnVzdGlkLm9jc3AuaWRlbnRydXN0LmNv +bTA7BggrBgEFBQcwAoYvaHR0cDovL2FwcHMuaWRlbnRydXN0LmNvbS9yb290cy9k +c3Ryb290Y2F4My5wN2MwHwYDVR0jBBgwFoAUxKexpHsscfrb4UuQdf/EFWCFiRAw +VAYDVR0gBE0wSzAIBgZngQwBAgEwPwYLKwYBBAGC3xMBAQEwMDAuBggrBgEFBQcC +ARYiaHR0cDovL2Nwcy5yb290LXgxLmxldHNlbmNyeXB0Lm9yZzA8BgNVHR8ENTAz +MDGgL6AthitodHRwOi8vY3JsLmlkZW50cnVzdC5jb20vRFNUUk9PVENBWDNDUkwu +Y3JsMB0GA1UdDgQWBBSoSmpjBH3duubRObemRWXv86jsoTANBgkqhkiG9w0BAQsF +AAOCAQEA3TPXEfNjWDjdGBX7CVW+dla5cEilaUcne8IkCJLxWh9KEik3JHRRHGJo +uM2VcGfl96S8TihRzZvoroed6ti6WqEBmtzw3Wodatg+VyOeph4EYpr/1wXKtx8/ +wApIvJSwtmVi4MFU5aMqrSDE6ea73Mj2tcMyo5jMd6jmeWUHK8so/joWUoHOUgwu +X4Po1QYz+3dszkDqMp4fklxBwXRsW10KXzPMTZ+sOPAveyxindmjkW8lGy+QsRlG +PfZ+G6Z6h7mjem0Y+iWlkYcV4PIWL1iwBi8saCbGS5jN2p8M+X+Q7UNKEkROb3N6 +KOqkqm57TH2H3eDJAkSnh6/DNFu0Qg== +-----END CERTIFICATE----- +` + rsaPrivateKeyFile = "fixtures/key.pem" + certificateFile = "fixtures/cert.pem" + multiCertificateFile = "fixtures/multi.pem" + rsaEncryptedPrivateKeyFile = "fixtures/encrypted_key.pem" + certificateOfEncryptedKeyFile = "fixtures/cert_of_encrypted_key.pem" +) + +// returns the name of a pre-generated, multiple-certificate CA file +// with both RSA and ECDSA certs. +func getMultiCert() string { + return multiCertificateFile +} + +// returns the names of pre-generated key and certificate files. +func getCertAndKey() (string, string) { + return rsaPrivateKeyFile, certificateFile +} + +// returns the names of pre-generated, encrypted private key and +// corresponding certificate file +func getCertAndEncryptedKey() (string, string) { + return rsaEncryptedPrivateKeyFile, certificateOfEncryptedKeyFile +} + +// If the cert files and directory are provided but are invalid, an error is +// returned. +func TestConfigServerTLSFailsIfUnableToLoadCerts(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + tempFile, err := ioutil.TempFile("", "cert-test") + if err != nil { + t.Fatal("Unable to create temporary empty file") + } + defer os.RemoveAll(tempFile.Name()) + tempFile.Close() + + for _, badFile := range []string{"not-a-file", tempFile.Name()} { + for i := 0; i < 3; i++ { + files := []string{cert, key, ca} + files[i] = badFile + + result, err := Server(Options{ + CertFile: files[0], + KeyFile: files[1], + CAFile: files[2], + ClientAuth: tls.VerifyClientCertIfGiven, + }) + if err == nil || result != nil { + t.Fatal("Expected a non-real file to error and return a nil TLS config") + } + } + } +} + +// If server cert and key are provided and client auth and client CA are not +// set, a tls config with only the server certs will be returned. +func TestConfigServerTLSServerCertsOnly(t *testing.T) { + key, cert := getCertAndKey() + + keypair, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + t.Fatal("Unable to load the generated cert and key") + } + + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + }) + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected server certificates") + } + if len(tlsConfig.Certificates[0].Certificate) != len(keypair.Certificate) { + t.Fatal("Unexpected server certificates") + } + for i, cert := range tlsConfig.Certificates[0].Certificate { + if !bytes.Equal(cert, keypair.Certificate[i]) { + t.Fatal("Unexpected server certificates") + } + } + + if !reflect.DeepEqual(tlsConfig.CipherSuites, DefaultServerAcceptedCiphers) { + t.Fatal("Unexpected server cipher suites") + } + if !tlsConfig.PreferServerCipherSuites { + t.Fatal("Expected server to prefer cipher suites") + } + if tlsConfig.MinVersion != tls.VersionTLS10 { + t.Fatal("Unexpected server TLS version") + } +} + +// If client CA is provided, it will only be used if the client auth is >= +// VerifyClientCertIfGiven +func TestConfigServerTLSClientCANotSetIfClientAuthTooLow(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.RequestClientCert, + CAFile: ca, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected server certificates") + } + if tlsConfig.ClientAuth != tls.RequestClientCert { + t.Fatal("ClientAuth was not set to what was in the options") + } + if tlsConfig.ClientCAs != nil { + t.Fatalf("Client CAs should never have been set") + } +} + +// If client CA is provided, it will only be used if the client auth is >= +// VerifyClientCertIfGiven +func TestConfigServerTLSClientCASet(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected server certificates") + } + if tlsConfig.ClientAuth != tls.VerifyClientCertIfGiven { + t.Fatal("ClientAuth was not set to what was in the options") + } + basePool, err := SystemCertPool() + if err != nil { + basePool = x509.NewCertPool() + } + // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots + if tlsConfig.ClientCAs == nil || len(tlsConfig.ClientCAs.Subjects()) != len(basePool.Subjects())+2 { + t.Fatalf("Client CAs were never set correctly") + } +} + +// Exclusive root pools determines whether the CA pool will be a union of the system +// certificate pool and custom certs, or an exclusive or of the custom certs and system pool +func TestConfigServerExclusiveRootPools(t *testing.T) { + key, cert := getCertAndKey() + ca := getMultiCert() + + caBytes, err := ioutil.ReadFile(ca) + if err != nil { + t.Fatal("Unable to read CA certs", err) + } + + var testCerts []*x509.Certificate + for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { + pemBlock, _ := pem.Decode(pemBytes) + if pemBlock == nil { + t.Fatal("Malformed certificate") + } + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + t.Fatal("Unable to parse certificate") + } + testCerts = append(testCerts, cert) + } + + // ExclusiveRootPools not set, so should be able to verify both system-signed certs + // and custom CA-signed certs + tlsConfig, err := Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}); err != nil { + t.Fatalf("Unable to verify certificate %d: %v", i, err) + } + } + + // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable + // and custom CA-signed certs should be verifiable + tlsConfig, err = Server(Options{ + CertFile: cert, + KeyFile: key, + ClientAuth: tls.VerifyClientCertIfGiven, + CAFile: ca, + ExclusiveRootPools: true, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) + switch { + case i == 0 && err != nil: + t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) + case i == 1 && err == nil: + t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) + } + } + + // No CA file provided, system cert should be verifiable only + tlsConfig, err = Server(Options{ + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.ClientCAs}) + switch { + case i == 1 && err != nil: + t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) + case i == 0 && err == nil: + t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) + } + } +} + +// If a valid minimum version is specified in the options, the server's +// minimum version should be set accordingly +func TestConfigServerTLSMinVersionIsSetBasedOnOptions(t *testing.T) { + versions := []uint16{ + tls.VersionTLS11, + tls.VersionTLS12, + } + key, cert := getCertAndKey() + + for _, v := range versions { + tlsConfig, err := Server(Options{ + MinVersion: v, + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure server TLS", err) + } + + if tlsConfig.MinVersion != v { + t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) + } + } +} + +// An error should be returned if the specified minimum version for the server +// is too low, i.e. less than VersionTLS10 +func TestConfigServerTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Server(Options{ + MinVersion: tls.VersionSSL30, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned an error for minimum version below TLS10") + } +} + +// An error should be returned if an invalid minimum version for the server is +// in the options struct +func TestConfigServerTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Server(Options{ + MinVersion: 1, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned error on invalid minimum version option") + } +} + +// The root CA is never set if InsecureSkipBoolean is set to true, but the +// default client options are set +func TestConfigClientTLSNoVerify(t *testing.T) { + ca := getMultiCert() + + tlsConfig, err := Client(Options{CAFile: ca, InsecureSkipVerify: true}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if tlsConfig.RootCAs != nil { + t.Fatal("Should not have set Root CAs", err) + } + + if !reflect.DeepEqual(tlsConfig.CipherSuites, clientCipherSuites) { + t.Fatal("Unexpected client cipher suites") + } + if tlsConfig.MinVersion != tls.VersionTLS12 { + t.Fatal("Unexpected client TLS version") + } + + if tlsConfig.Certificates != nil { + t.Fatal("Somehow client certificates were set") + } +} + +// The root CA is never set if InsecureSkipBoolean is set to false and root CA +// is not provided. +func TestConfigClientTLSNoRoot(t *testing.T) { + tlsConfig, err := Client(Options{}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if tlsConfig.RootCAs != nil { + t.Fatal("Should not have set Root CAs", err) + } + + if !reflect.DeepEqual(tlsConfig.CipherSuites, clientCipherSuites) { + t.Fatal("Unexpected client cipher suites") + } + if tlsConfig.MinVersion != tls.VersionTLS12 { + t.Fatal("Unexpected client TLS version") + } + + if tlsConfig.Certificates != nil { + t.Fatal("Somehow client certificates were set") + } +} + +// The RootCA is set if the file is provided and InsecureSkipVerify is false +func TestConfigClientTLSRootCAFileWithOneCert(t *testing.T) { + ca := getMultiCert() + + tlsConfig, err := Client(Options{CAFile: ca}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + basePool, err := SystemCertPool() + if err != nil { + basePool = x509.NewCertPool() + } + // because we are not enabling `ExclusiveRootPools`, any root pool will also contain the system roots + if tlsConfig.RootCAs == nil || len(tlsConfig.RootCAs.Subjects()) != len(basePool.Subjects())+2 { + t.Fatal("Root CAs not set properly", err) + } + if tlsConfig.Certificates != nil { + t.Fatal("Somehow client certificates were set") + } +} + +// An error is returned if a root CA is provided but the file doesn't exist. +func TestConfigClientTLSNonexistentRootCAFile(t *testing.T) { + tlsConfig, err := Client(Options{CAFile: "nonexistent"}) + + if err == nil || tlsConfig != nil { + t.Fatal("Should not have been able to configure client TLS", err) + } +} + +// An error is returned if either the client cert or the key are provided +// but invalid or blank. +func TestConfigClientTLSClientCertOrKeyInvalid(t *testing.T) { + key, cert := getCertAndKey() + + tempFile, err := ioutil.TempFile("", "cert-test") + if err != nil { + t.Fatal("Unable to create temporary empty file") + } + defer os.Remove(tempFile.Name()) + tempFile.Close() + + for i := 0; i < 2; i++ { + for _, invalid := range []string{"not-a-file", "", tempFile.Name()} { + files := []string{cert, key} + files[i] = invalid + + tlsConfig, err := Client(Options{CertFile: files[0], KeyFile: files[1]}) + if err == nil || tlsConfig != nil { + t.Fatal("Should not have been able to configure client TLS", err) + } + } + } +} + +// The certificate is set if the client cert and client key are provided and +// valid. +func TestConfigClientTLSValidClientCertAndKey(t *testing.T) { + key, cert := getCertAndKey() + + keypair, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + t.Fatal("Unable to load the generated cert and key") + } + + tlsConfig, err := Client(Options{CertFile: cert, KeyFile: key}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected client certificates") + } + if len(tlsConfig.Certificates[0].Certificate) != len(keypair.Certificate) { + t.Fatal("Unexpected client certificates") + } + for i, cert := range tlsConfig.Certificates[0].Certificate { + if !bytes.Equal(cert, keypair.Certificate[i]) { + t.Fatal("Unexpected client certificates") + } + } + + if tlsConfig.RootCAs != nil { + t.Fatal("Root CAs should not have been set", err) + } +} + +// The certificate is set if the client cert and encrypted client key are +// provided and valid and passphrase can decrypt the key +func TestConfigClientTLSValidClientCertAndEncryptedKey(t *testing.T) { + key, cert := getCertAndEncryptedKey() + + tlsConfig, err := Client(Options{ + CertFile: cert, + KeyFile: key, + Passphrase: "FooBar123", + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if len(tlsConfig.Certificates) != 1 { + t.Fatal("Unexpected client certificates") + } +} + +// The certificate is not set if the provided passphrase cannot decrypt +// the encrypted key. +func TestConfigClientTLSNotSetWithInvalidPassphrase(t *testing.T) { + key, cert := getCertAndEncryptedKey() + + tlsConfig, err := Client(Options{ + CertFile: cert, + KeyFile: key, + Passphrase: "InvalidPassphrase", + }) + + if !IsErrEncryptedKey(err) || tlsConfig != nil { + t.Fatal("Expected failure due to incorrect passphrase.") + } +} + +// Exclusive root pools determines whether the CA pool will be a union of the system +// certificate pool and custom certs, or an exclusive or of the custom certs and system pool +func TestConfigClientExclusiveRootPools(t *testing.T) { + ca := getMultiCert() + + caBytes, err := ioutil.ReadFile(ca) + if err != nil { + t.Fatal("Unable to read CA certs", err) + } + + var testCerts []*x509.Certificate + for _, pemBytes := range [][]byte{caBytes, []byte(systemRootTrustedCert)} { + pemBlock, _ := pem.Decode(pemBytes) + if pemBlock == nil { + t.Fatal("Malformed certificate") + } + cert, err := x509.ParseCertificate(pemBlock.Bytes) + if err != nil { + t.Fatal("Unable to parse certificate") + } + testCerts = append(testCerts, cert) + } + + // ExclusiveRootPools not set, so should be able to verify both system-signed certs + // and custom CA-signed certs + tlsConfig, err := Client(Options{CAFile: ca}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + if _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}); err != nil { + t.Fatalf("Unable to verify certificate %d: %v", i, err) + } + } + + // ExclusiveRootPools set and custom CA provided, so system certs should not be verifiable + // and custom CA-signed certs should be verifiable + tlsConfig, err = Client(Options{ + CAFile: ca, + ExclusiveRootPools: true, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) + switch { + case i == 0 && err != nil: + t.Fatal("Unable to verify custom certificate, even though the root pool should have only the custom CA", err) + case i == 1 && err == nil: + t.Fatal("Successfully verified system root-signed certificate though the root pool should have only the cusotm CA", err) + } + } + + // No CA file provided, system cert should be verifiable only + tlsConfig, err = Client(Options{}) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + for i, cert := range testCerts { + _, err := cert.Verify(x509.VerifyOptions{Roots: tlsConfig.RootCAs}) + switch { + case i == 1 && err != nil: + t.Fatal("Unable to verify system root-signed certificate, even though the root pool should be the system pool only", err) + case i == 0 && err == nil: + t.Fatal("Successfully verified custom certificate though the root pool should be the system pool only", err) + } + } +} + +// If a valid MinVersion is specified in the options, the client's +// minimum version should be set accordingly +func TestConfigClientTLSMinVersionIsSetBasedOnOptions(t *testing.T) { + key, cert := getCertAndKey() + + tlsConfig, err := Client(Options{ + MinVersion: tls.VersionTLS12, + CertFile: cert, + KeyFile: key, + }) + + if err != nil || tlsConfig == nil { + t.Fatal("Unable to configure client TLS", err) + } + + if tlsConfig.MinVersion != tls.VersionTLS12 { + t.Fatal("Unexpected minimum TLS version: ", tlsConfig.MinVersion) + } +} + +// An error should be returned if the specified minimum version for the client +// is too low, i.e. less than VersionTLS12 +func TestConfigClientTLSMinVersionNotSetIfMinVersionIsTooLow(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Client(Options{ + MinVersion: tls.VersionTLS11, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned an error for minimum version below TLS12") + } +} + +// An error should be returned if an invalid minimum version for the client is +// in the options struct +func TestConfigClientTLSMinVersionNotSetIfMinVersionIsInvalid(t *testing.T) { + key, cert := getCertAndKey() + + _, err := Client(Options{ + MinVersion: 1, + CertFile: cert, + KeyFile: key, + }) + + if err == nil { + t.Fatal("Should have returned error on invalid minimum version option") + } +} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 0000000000..9ea86d784e --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 0000000000..b55b37bc31 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 0000000000..477be8b214 --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 0000000000..4f70a4e134 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 0000000000..9043b35478 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 0000000000..ba02af26dc --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 46 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/duration_test.go b/vendor/github.com/docker/go-units/duration_test.go new file mode 100644 index 0000000000..e436c383f7 --- /dev/null +++ b/vendor/github.com/docker/go-units/duration_test.go @@ -0,0 +1,95 @@ +package units + +import ( + "fmt" + "testing" + "time" +) + +func ExampleHumanDuration() { + fmt.Println(HumanDuration(450 * time.Millisecond)) + fmt.Println(HumanDuration(47 * time.Second)) + fmt.Println(HumanDuration(1 * time.Minute)) + fmt.Println(HumanDuration(3 * time.Minute)) + fmt.Println(HumanDuration(35 * time.Minute)) + fmt.Println(HumanDuration(35*time.Minute + 40*time.Second)) + fmt.Println(HumanDuration(1 * time.Hour)) + fmt.Println(HumanDuration(1*time.Hour + 45*time.Minute)) + fmt.Println(HumanDuration(3 * time.Hour)) + fmt.Println(HumanDuration(3*time.Hour + 59*time.Minute)) + fmt.Println(HumanDuration(3*time.Hour + 60*time.Minute)) + fmt.Println(HumanDuration(24 * time.Hour)) + fmt.Println(HumanDuration(24*time.Hour + 12*time.Hour)) + fmt.Println(HumanDuration(2 * 24 * time.Hour)) + fmt.Println(HumanDuration(7 * 24 * time.Hour)) + fmt.Println(HumanDuration(13*24*time.Hour + 5*time.Hour)) + fmt.Println(HumanDuration(2 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(2*7*24*time.Hour + 4*24*time.Hour)) + fmt.Println(HumanDuration(3 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(4 * 7 * 24 * time.Hour)) + fmt.Println(HumanDuration(4*7*24*time.Hour + 3*24*time.Hour)) + fmt.Println(HumanDuration(1 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(1*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(2 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(3*30*24*time.Hour + 1*7*24*time.Hour)) + fmt.Println(HumanDuration(5*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(13 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(23 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(24 * 30 * 24 * time.Hour)) + fmt.Println(HumanDuration(24*30*24*time.Hour + 2*7*24*time.Hour)) + fmt.Println(HumanDuration(3*365*24*time.Hour + 2*30*24*time.Hour)) +} + +func TestHumanDuration(t *testing.T) { + // Useful duration abstractions + day := 24 * time.Hour + week := 7 * day + month := 30 * day + year := 365 * day + + assertEquals(t, "Less than a second", HumanDuration(450*time.Millisecond)) + assertEquals(t, "1 second", HumanDuration(1*time.Second)) + assertEquals(t, "45 seconds", HumanDuration(45*time.Second)) + assertEquals(t, "46 seconds", HumanDuration(46*time.Second)) + assertEquals(t, "59 seconds", HumanDuration(59*time.Second)) + assertEquals(t, "About a minute", HumanDuration(60*time.Second)) + assertEquals(t, "About a minute", HumanDuration(1*time.Minute)) + assertEquals(t, "3 minutes", HumanDuration(3*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute)) + assertEquals(t, "35 minutes", HumanDuration(35*time.Minute+40*time.Second)) + assertEquals(t, "45 minutes", HumanDuration(45*time.Minute)) + assertEquals(t, "45 minutes", HumanDuration(45*time.Minute+40*time.Second)) + assertEquals(t, "About an hour", HumanDuration(46*time.Minute)) + assertEquals(t, "About an hour", HumanDuration(59*time.Minute)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour)) + assertEquals(t, "About an hour", HumanDuration(1*time.Hour+29*time.Minute)) + assertEquals(t, "2 hours", HumanDuration(1*time.Hour+31*time.Minute)) + assertEquals(t, "2 hours", HumanDuration(1*time.Hour+59*time.Minute)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour)) + assertEquals(t, "3 hours", HumanDuration(3*time.Hour+29*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+31*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+59*time.Minute)) + assertEquals(t, "4 hours", HumanDuration(3*time.Hour+60*time.Minute)) + assertEquals(t, "24 hours", HumanDuration(24*time.Hour)) + assertEquals(t, "36 hours", HumanDuration(1*day+12*time.Hour)) + assertEquals(t, "2 days", HumanDuration(2*day)) + assertEquals(t, "7 days", HumanDuration(7*day)) + assertEquals(t, "13 days", HumanDuration(13*day+5*time.Hour)) + assertEquals(t, "2 weeks", HumanDuration(2*week)) + assertEquals(t, "2 weeks", HumanDuration(2*week+4*day)) + assertEquals(t, "3 weeks", HumanDuration(3*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week)) + assertEquals(t, "4 weeks", HumanDuration(4*week+3*day)) + assertEquals(t, "4 weeks", HumanDuration(1*month)) + assertEquals(t, "6 weeks", HumanDuration(1*month+2*week)) + assertEquals(t, "2 months", HumanDuration(2*month)) + assertEquals(t, "2 months", HumanDuration(2*month+2*week)) + assertEquals(t, "3 months", HumanDuration(3*month)) + assertEquals(t, "3 months", HumanDuration(3*month+1*week)) + assertEquals(t, "5 months", HumanDuration(5*month+2*week)) + assertEquals(t, "13 months", HumanDuration(13*month)) + assertEquals(t, "23 months", HumanDuration(23*month)) + assertEquals(t, "24 months", HumanDuration(24*month)) + assertEquals(t, "2 years", HumanDuration(24*month+2*week)) + assertEquals(t, "3 years", HumanDuration(3*year+2*month)) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 0000000000..44616c2718 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,108 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/vendor/github.com/docker/go-units/size_test.go b/vendor/github.com/docker/go-units/size_test.go new file mode 100644 index 0000000000..8923e50761 --- /dev/null +++ b/vendor/github.com/docker/go-units/size_test.go @@ -0,0 +1,165 @@ +package units + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "testing" +) + +func ExampleBytesSize() { + fmt.Println(BytesSize(1024)) + fmt.Println(BytesSize(1024 * 1024)) + fmt.Println(BytesSize(1048576)) + fmt.Println(BytesSize(2 * MiB)) + fmt.Println(BytesSize(3.42 * GiB)) + fmt.Println(BytesSize(5.372 * TiB)) + fmt.Println(BytesSize(2.22 * PiB)) +} + +func ExampleHumanSize() { + fmt.Println(HumanSize(1000)) + fmt.Println(HumanSize(1024)) + fmt.Println(HumanSize(1000000)) + fmt.Println(HumanSize(1048576)) + fmt.Println(HumanSize(2 * MB)) + fmt.Println(HumanSize(float64(3.42 * GB))) + fmt.Println(HumanSize(float64(5.372 * TB))) + fmt.Println(HumanSize(float64(2.22 * PB))) +} + +func ExampleFromHumanSize() { + fmt.Println(FromHumanSize("32")) + fmt.Println(FromHumanSize("32b")) + fmt.Println(FromHumanSize("32B")) + fmt.Println(FromHumanSize("32k")) + fmt.Println(FromHumanSize("32K")) + fmt.Println(FromHumanSize("32kb")) + fmt.Println(FromHumanSize("32Kb")) + fmt.Println(FromHumanSize("32Mb")) + fmt.Println(FromHumanSize("32Gb")) + fmt.Println(FromHumanSize("32Tb")) + fmt.Println(FromHumanSize("32Pb")) +} + +func ExampleRAMInBytes() { + fmt.Println(RAMInBytes("32")) + fmt.Println(RAMInBytes("32b")) + fmt.Println(RAMInBytes("32B")) + fmt.Println(RAMInBytes("32k")) + fmt.Println(RAMInBytes("32K")) + fmt.Println(RAMInBytes("32kb")) + fmt.Println(RAMInBytes("32Kb")) + fmt.Println(RAMInBytes("32Mb")) + fmt.Println(RAMInBytes("32Gb")) + fmt.Println(RAMInBytes("32Tb")) + fmt.Println(RAMInBytes("32Pb")) + fmt.Println(RAMInBytes("32PB")) + fmt.Println(RAMInBytes("32P")) +} + +func TestBytesSize(t *testing.T) { + assertEquals(t, "1KiB", BytesSize(1024)) + assertEquals(t, "1MiB", BytesSize(1024*1024)) + assertEquals(t, "1MiB", BytesSize(1048576)) + assertEquals(t, "2MiB", BytesSize(2*MiB)) + assertEquals(t, "3.42GiB", BytesSize(3.42*GiB)) + assertEquals(t, "5.372TiB", BytesSize(5.372*TiB)) + assertEquals(t, "2.22PiB", BytesSize(2.22*PiB)) + assertEquals(t, "1.049e+06YiB", BytesSize(KiB*KiB*KiB*KiB*KiB*PiB)) +} + +func TestHumanSize(t *testing.T) { + assertEquals(t, "1kB", HumanSize(1000)) + assertEquals(t, "1.024kB", HumanSize(1024)) + assertEquals(t, "1MB", HumanSize(1000000)) + assertEquals(t, "1.049MB", HumanSize(1048576)) + assertEquals(t, "2MB", HumanSize(2*MB)) + assertEquals(t, "3.42GB", HumanSize(float64(3.42*GB))) + assertEquals(t, "5.372TB", HumanSize(float64(5.372*TB))) + assertEquals(t, "2.22PB", HumanSize(float64(2.22*PB))) + assertEquals(t, "1e+04YB", HumanSize(float64(10000000000000*PB))) +} + +func TestFromHumanSize(t *testing.T) { + assertSuccessEquals(t, 32, FromHumanSize, "32") + assertSuccessEquals(t, 32, FromHumanSize, "32b") + assertSuccessEquals(t, 32, FromHumanSize, "32B") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32k") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32K") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32kb") + assertSuccessEquals(t, 32*KB, FromHumanSize, "32Kb") + assertSuccessEquals(t, 32*MB, FromHumanSize, "32Mb") + assertSuccessEquals(t, 32*GB, FromHumanSize, "32Gb") + assertSuccessEquals(t, 32*TB, FromHumanSize, "32Tb") + assertSuccessEquals(t, 32*PB, FromHumanSize, "32Pb") + + assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5kB") + assertSuccessEquals(t, 32.5*KB, FromHumanSize, "32.5 kB") + assertSuccessEquals(t, 32, FromHumanSize, "32.5 B") + + assertError(t, FromHumanSize, "") + assertError(t, FromHumanSize, "hello") + assertError(t, FromHumanSize, "-32") + assertError(t, FromHumanSize, ".3kB") + assertError(t, FromHumanSize, " 32 ") + assertError(t, FromHumanSize, "32m b") + assertError(t, FromHumanSize, "32bm") +} + +func TestRAMInBytes(t *testing.T) { + assertSuccessEquals(t, 32, RAMInBytes, "32") + assertSuccessEquals(t, 32, RAMInBytes, "32b") + assertSuccessEquals(t, 32, RAMInBytes, "32B") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32k") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32K") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32kb") + assertSuccessEquals(t, 32*KiB, RAMInBytes, "32Kb") + assertSuccessEquals(t, 32*MiB, RAMInBytes, "32Mb") + assertSuccessEquals(t, 32*GiB, RAMInBytes, "32Gb") + assertSuccessEquals(t, 32*TiB, RAMInBytes, "32Tb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32Pb") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32PB") + assertSuccessEquals(t, 32*PiB, RAMInBytes, "32P") + + assertSuccessEquals(t, 32, RAMInBytes, "32.3") + tmp := 32.3 * MiB + assertSuccessEquals(t, int64(tmp), RAMInBytes, "32.3 mb") + + assertError(t, RAMInBytes, "") + assertError(t, RAMInBytes, "hello") + assertError(t, RAMInBytes, "-32") + assertError(t, RAMInBytes, " 32 ") + assertError(t, RAMInBytes, "32m b") + assertError(t, RAMInBytes, "32bm") +} + +func assertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + t.Errorf("Expected '%v' but got '%v'", expected, actual) + } +} + +// func that maps to the parse function signatures as testing abstraction +type parseFn func(string) (int64, error) + +// Define 'String()' for pretty-print +func (fn parseFn) String() string { + fnName := runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name() + return fnName[strings.LastIndex(fnName, ".")+1:] +} + +func assertSuccessEquals(t *testing.T, expected int64, fn parseFn, arg string) { + res, err := fn(arg) + if err != nil || res != expected { + t.Errorf("%s(\"%s\") -> expected '%d' but got '%d' with error '%v'", fn, arg, expected, res, err) + } +} + +func assertError(t *testing.T, fn parseFn, arg string) { + res, err := fn(arg) + if err == nil && res != -1 { + t.Errorf("%s(\"%s\") -> expected error but got '%d'", fn, arg, res) + } +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 0000000000..5ac7fd825f --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,118 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/docker/go-units/ulimit_test.go b/vendor/github.com/docker/go-units/ulimit_test.go new file mode 100644 index 0000000000..902e02369a --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit_test.go @@ -0,0 +1,131 @@ +package units + +import ( + "fmt" + "strconv" + "testing" +) + +func ExampleParseUlimit() { + fmt.Println(ParseUlimit("nofile=512:1024")) + fmt.Println(ParseUlimit("nofile=1024")) + fmt.Println(ParseUlimit("cpu=2:4")) + fmt.Println(ParseUlimit("cpu=6")) +} + +func TestParseUlimitValid(t *testing.T) { + u1 := &Ulimit{"nofile", 1024, 512} + if u2, _ := ParseUlimit("nofile=512:1024"); *u1 != *u2 { + t.Fatalf("expected %q, but got %q", u1, u2) + } +} + +func TestParseUlimitInvalidLimitType(t *testing.T) { + if _, err := ParseUlimit("notarealtype=1024:1024"); err == nil { + t.Fatalf("expected error on invalid ulimit type") + } +} + +func TestParseUlimitBadFormat(t *testing.T) { + if _, err := ParseUlimit("nofile:1024:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := ParseUlimit("nofile"); err == nil { + t.Fatal("expected error on bad syntax") + } + + if _, err := ParseUlimit("nofile="); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := ParseUlimit("nofile=:"); err == nil { + t.Fatal("expected error on bad syntax") + } + if _, err := ParseUlimit("nofile=:1024"); err == nil { + t.Fatal("expected error on bad syntax") + } +} + +func TestParseUlimitHardLessThanSoft(t *testing.T) { + if _, err := ParseUlimit("nofile=1024:1"); err == nil { + t.Fatal("expected error on hard limit less than soft limit") + } +} + +func TestParseUlimitInvalidValueType(t *testing.T) { + if _, err := ParseUlimit("nofile=asdf"); err == nil { + t.Fatal("expected error on bad value type, but got no error") + } else if _, ok := err.(*strconv.NumError); !ok { + t.Fatalf("expected error on bad value type, but got `%s`", err) + } + + if _, err := ParseUlimit("nofile=1024:asdf"); err == nil { + t.Fatal("expected error on bad value type, but got no error") + } else if _, ok := err.(*strconv.NumError); !ok { + t.Fatalf("expected error on bad value type, but got `%s`", err) + } +} + +func TestParseUlimitTooManyValueArgs(t *testing.T) { + if _, err := ParseUlimit("nofile=1024:1:50"); err == nil { + t.Fatalf("expected error on more than two value arguments") + } +} + +func TestUlimitStringOutput(t *testing.T) { + u := &Ulimit{"nofile", 1024, 512} + if s := u.String(); s != "nofile=512:1024" { + t.Fatal("expected String to return nofile=512:1024, but got", s) + } +} + +func TestGetRlimit(t *testing.T) { + tt := []struct { + ulimit Ulimit + rlimit Rlimit + }{ + {Ulimit{"core", 10, 12}, Rlimit{rlimitCore, 10, 12}}, + {Ulimit{"cpu", 1, 10}, Rlimit{rlimitCPU, 1, 10}}, + {Ulimit{"data", 5, 0}, Rlimit{rlimitData, 5, 0}}, + {Ulimit{"fsize", 2, 2}, Rlimit{rlimitFsize, 2, 2}}, + {Ulimit{"locks", 0, 0}, Rlimit{rlimitLocks, 0, 0}}, + {Ulimit{"memlock", 10, 10}, Rlimit{rlimitMemlock, 10, 10}}, + {Ulimit{"msgqueue", 9, 1}, Rlimit{rlimitMsgqueue, 9, 1}}, + {Ulimit{"nice", 9, 9}, Rlimit{rlimitNice, 9, 9}}, + {Ulimit{"nofile", 4, 100}, Rlimit{rlimitNofile, 4, 100}}, + {Ulimit{"nproc", 5, 5}, Rlimit{rlimitNproc, 5, 5}}, + {Ulimit{"rss", 0, 5}, Rlimit{rlimitRss, 0, 5}}, + {Ulimit{"rtprio", 100, 65}, Rlimit{rlimitRtprio, 100, 65}}, + {Ulimit{"rttime", 55, 102}, Rlimit{rlimitRttime, 55, 102}}, + {Ulimit{"sigpending", 14, 20}, Rlimit{rlimitSigpending, 14, 20}}, + {Ulimit{"stack", 1, 1}, Rlimit{rlimitStack, 1, 1}}, + } + + for _, te := range tt { + res, err := te.ulimit.GetRlimit() + if err != nil { + t.Errorf("expected not to fail: %s", err) + } + if res.Type != te.rlimit.Type { + t.Errorf("expected Type to be %d but got %d", + te.rlimit.Type, res.Type) + } + if res.Soft != te.rlimit.Soft { + t.Errorf("expected Soft to be %d but got %d", + te.rlimit.Soft, res.Soft) + } + if res.Hard != te.rlimit.Hard { + t.Errorf("expected Hard to be %d but got %d", + te.rlimit.Hard, res.Hard) + } + + } +} + +func TestGetRlimitBadUlimitName(t *testing.T) { + name := "bla" + uLimit := Ulimit{name, 0, 0} + if _, err := uLimit.GetRlimit(); err == nil { + t.Error("expected error on bad Ulimit name") + } +} diff --git a/vendor/github.com/eapache/go-resiliency/.gitignore b/vendor/github.com/eapache/go-resiliency/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/eapache/go-resiliency/.travis.yml b/vendor/github.com/eapache/go-resiliency/.travis.yml new file mode 100644 index 0000000000..26c6bf8fe5 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 diff --git a/vendor/github.com/eapache/go-resiliency/README.md b/vendor/github.com/eapache/go-resiliency/README.md new file mode 100644 index 0000000000..0a0d70111d --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/README.md @@ -0,0 +1,21 @@ +go-resiliency +============= + +[![Build Status](https://travis-ci.org/eapache/go-resiliency.svg?branch=master)](https://travis-ci.org/eapache/go-resiliency) +[![GoDoc](https://godoc.org/github.com/eapache/go-resiliency?status.svg)](https://godoc.org/github.com/eapache/go-resiliency) +[![Code of Conduct](https://img.shields.io/badge/code%20of%20conduct-active-blue.svg)](https://eapache.github.io/conduct.html) + +Resiliency patterns for golang. +Based in part on [Hystrix](https://github.com/Netflix/Hystrix), +[Semian](https://github.com/Shopify/semian), and others. + +Currently implemented patterns include: +- circuit-breaker (in the `breaker` directory) +- semaphore (in the `semaphore` directory) +- deadline/timeout (in the `deadline` directory) +- batching (in the `batcher` directory) +- retriable (in the `retrier` directory) + +Follows semantic versioning using https://gopkg.in/ - import from +[`gopkg.in/eapache/go-resiliency.v1`](https://gopkg.in/eapache/go-resiliency.v1) +for guaranteed API stability. diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go new file mode 100644 index 0000000000..b41308db60 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker_test.go @@ -0,0 +1,196 @@ +package breaker + +import ( + "errors" + "testing" + "time" +) + +var errSomeError = errors.New("errSomeError") + +func alwaysPanics() error { + panic("foo") +} + +func returnsError() error { + return errSomeError +} + +func returnsSuccess() error { + return nil +} + +func TestBreakerErrorExpiry(t *testing.T) { + breaker := New(2, 1, 1*time.Second) + + for i := 0; i < 3; i++ { + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + time.Sleep(1 * time.Second) + } + + for i := 0; i < 3; i++ { + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + time.Sleep(1 * time.Second) + } +} + +func TestBreakerPanicsCountAsErrors(t *testing.T) { + breaker := New(3, 2, 1*time.Second) + + // three errors opens the breaker + for i := 0; i < 3; i++ { + func() { + defer func() { + val := recover() + if val.(string) != "foo" { + t.Error("incorrect panic") + } + }() + if err := breaker.Run(alwaysPanics); err != nil { + t.Error(err) + } + t.Error("shouldn't get here") + }() + } + + // breaker is open + for i := 0; i < 5; i++ { + if err := breaker.Run(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + } +} + +func TestBreakerStateTransitions(t *testing.T) { + breaker := New(3, 2, 1*time.Second) + + // three errors opens the breaker + for i := 0; i < 3; i++ { + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + } + + // breaker is open + for i := 0; i < 5; i++ { + if err := breaker.Run(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // one success works, but is not enough to fully close + if err := breaker.Run(returnsSuccess); err != nil { + t.Error(err) + } + // error works, but re-opens immediately + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + // breaker is open + if err := breaker.Run(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // two successes is enough to close it for good + for i := 0; i < 2; i++ { + if err := breaker.Run(returnsSuccess); err != nil { + t.Error(err) + } + } + // error works + if err := breaker.Run(returnsError); err != errSomeError { + t.Error(err) + } + // breaker is still closed + if err := breaker.Run(returnsSuccess); err != nil { + t.Error(err) + } +} + +func TestBreakerAsyncStateTransitions(t *testing.T) { + breaker := New(3, 2, 1*time.Second) + + // three errors opens the breaker + for i := 0; i < 3; i++ { + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + } + + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + + // breaker is open + for i := 0; i < 5; i++ { + if err := breaker.Go(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // one success works, but is not enough to fully close + if err := breaker.Go(returnsSuccess); err != nil { + t.Error(err) + } + // error works, but re-opens immediately + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + // breaker is open + if err := breaker.Go(returnsError); err != ErrBreakerOpen { + t.Error(err) + } + + // wait for it to half-close + time.Sleep(2 * time.Second) + // two successes is enough to close it for good + for i := 0; i < 2; i++ { + if err := breaker.Go(returnsSuccess); err != nil { + t.Error(err) + } + } + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + // error works + if err := breaker.Go(returnsError); err != nil { + t.Error(err) + } + // just enough to yield the scheduler and let the goroutines work off + time.Sleep(1 * time.Millisecond) + // breaker is still closed + if err := breaker.Go(returnsSuccess); err != nil { + t.Error(err) + } +} + +func ExampleBreaker() { + breaker := New(3, 1, 5*time.Second) + + for { + result := breaker.Run(func() error { + // communicate with some external service and + // return an error if the communication failed + return nil + }) + + switch result { + case nil: + // success! + case ErrBreakerOpen: + // our function wasn't run because the breaker was open + default: + // some other error + } + } +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml new file mode 100644 index 0000000000..d6cf4f1fa1 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: +- 1.5.4 +- 1.6.1 + +sudo: false diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy_test.go b/vendor/github.com/eapache/go-xerial-snappy/snappy_test.go new file mode 100644 index 0000000000..e94f635dfa --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy_test.go @@ -0,0 +1,49 @@ +package snappy + +import ( + "bytes" + "testing" +) + +var snappyTestCases = map[string][]byte{ + "REPEATREPEATREPEATREPEATREPEATREPEAT": {36, 20, 82, 69, 80, 69, 65, 84, 118, 6, 0}, + "REALLY SHORT": {12, 44, 82, 69, 65, 76, 76, 89, 32, 83, 72, 79, 82, 84}, + "AXBXCXDXEXFX": {12, 44, 65, 88, 66, 88, 67, 88, 68, 88, 69, 88, 70, 88}, +} + +var snappyStreamTestCases = map[string][]byte{ + "PLAINDATA": {130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 11, 9, 32, 80, 76, 65, 73, 78, 68, 65, 84, 65}, + `{"a":"UtaitILHMDAAAAfU","b":"日本"}`: {130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 39, 37, 144, 123, 34, 97, 34, 58, 34, 85, 116, 97, 105, 116, 73, 76, 72, 77, 68, 65, 65, 65, 65, 102, 85, 34, 44, 34, 98, 34, 58, 34, 230, 151, 165, 230, 156, 172, 34, 125}, + `Sed ut perspiciatis unde omnis iste natus error sit voluptatem accusantium doloremque laudantium, totam rem aperiam, eaque ipsa quae ab illo inventore veritatis et quasi architecto beatae vitae dicta sunt explicabo. Nemo enim ipsam voluptatem quia voluptas sit aspernatur aut odit aut fugit, sed quia consequuntur magni dolores eos qui ratione voluptatem sequi nesciunt. Neque porro quisquam est, qui dolorem ipsum quia dolor sit amet, consectetur, adipisci velit, sed quia non numquam eius modi tempora incidunt ut labore et dolore magnam aliquam quaerat voluptatem. Ut enim ad minima veniam, quis nostrum exercitationem ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi consequatur? Quis autem vel eum iure reprehenderit qui in ea voluptate velit esse quam nihil molestiae consequatur, vel illum qui dolorem eum fugiat quo voluptas nulla pariatur? At vero eos et accusamus et iusto odio dignissimos ducimus qui blanditiis praesentium voluptatum deleniti atque corrupti quos dolores et quas molestias except`: {130, 83, 78, 65, 80, 80, 89, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 3, 89, 128, 8, 240, 90, 83, 101, 100, 32, 117, 116, 32, 112, 101, 114, 115, 112, 105, 99, 105, 97, 116, 105, 115, 32, 117, 110, 100, 101, 32, 111, 109, 110, 105, 115, 32, 105, 115, 116, 101, 32, 110, 97, 116, 117, 115, 32, 101, 114, 114, 111, 114, 32, 115, 105, 116, 32, 118, 111, 108, 117, 112, 116, 97, 116, 101, 109, 32, 97, 99, 99, 117, 115, 97, 110, 116, 105, 117, 109, 32, 100, 111, 108, 111, 114, 101, 109, 113, 117, 101, 32, 108, 97, 117, 100, 97, 5, 22, 240, 60, 44, 32, 116, 111, 116, 97, 109, 32, 114, 101, 109, 32, 97, 112, 101, 114, 105, 97, 109, 44, 32, 101, 97, 113, 117, 101, 32, 105, 112, 115, 97, 32, 113, 117, 97, 101, 32, 97, 98, 32, 105, 108, 108, 111, 32, 105, 110, 118, 101, 110, 116, 111, 114, 101, 32, 118, 101, 114, 105, 116, 97, 1, 141, 4, 101, 116, 1, 36, 88, 115, 105, 32, 97, 114, 99, 104, 105, 116, 101, 99, 116, 111, 32, 98, 101, 97, 116, 97, 101, 32, 118, 105, 1, 6, 120, 100, 105, 99, 116, 97, 32, 115, 117, 110, 116, 32, 101, 120, 112, 108, 105, 99, 97, 98, 111, 46, 32, 78, 101, 109, 111, 32, 101, 110, 105, 109, 5, 103, 0, 109, 46, 180, 0, 12, 113, 117, 105, 97, 17, 16, 0, 115, 5, 209, 72, 97, 115, 112, 101, 114, 110, 97, 116, 117, 114, 32, 97, 117, 116, 32, 111, 100, 105, 116, 5, 9, 36, 102, 117, 103, 105, 116, 44, 32, 115, 101, 100, 9, 53, 32, 99, 111, 110, 115, 101, 113, 117, 117, 110, 1, 42, 20, 109, 97, 103, 110, 105, 32, 9, 245, 16, 115, 32, 101, 111, 115, 1, 36, 28, 32, 114, 97, 116, 105, 111, 110, 101, 17, 96, 33, 36, 1, 51, 36, 105, 32, 110, 101, 115, 99, 105, 117, 110, 116, 1, 155, 1, 254, 16, 112, 111, 114, 114, 111, 1, 51, 36, 115, 113, 117, 97, 109, 32, 101, 115, 116, 44, 1, 14, 13, 81, 5, 183, 4, 117, 109, 1, 18, 0, 97, 9, 19, 4, 32, 115, 1, 149, 12, 109, 101, 116, 44, 9, 135, 76, 99, 116, 101, 116, 117, 114, 44, 32, 97, 100, 105, 112, 105, 115, 99, 105, 32, 118, 101, 108, 50, 173, 0, 24, 110, 111, 110, 32, 110, 117, 109, 9, 94, 84, 105, 117, 115, 32, 109, 111, 100, 105, 32, 116, 101, 109, 112, 111, 114, 97, 32, 105, 110, 99, 105, 100, 33, 52, 20, 117, 116, 32, 108, 97, 98, 33, 116, 4, 101, 116, 9, 106, 0, 101, 5, 219, 20, 97, 109, 32, 97, 108, 105, 5, 62, 33, 164, 8, 114, 97, 116, 29, 212, 12, 46, 32, 85, 116, 41, 94, 52, 97, 100, 32, 109, 105, 110, 105, 109, 97, 32, 118, 101, 110, 105, 33, 221, 72, 113, 117, 105, 115, 32, 110, 111, 115, 116, 114, 117, 109, 32, 101, 120, 101, 114, 99, 105, 33, 202, 104, 111, 110, 101, 109, 32, 117, 108, 108, 97, 109, 32, 99, 111, 114, 112, 111, 114, 105, 115, 32, 115, 117, 115, 99, 105, 112, 105, 13, 130, 8, 105, 111, 115, 1, 64, 12, 110, 105, 115, 105, 1, 150, 5, 126, 44, 105, 100, 32, 101, 120, 32, 101, 97, 32, 99, 111, 109, 5, 192, 0, 99, 41, 131, 33, 172, 8, 63, 32, 81, 1, 107, 4, 97, 117, 33, 101, 96, 118, 101, 108, 32, 101, 117, 109, 32, 105, 117, 114, 101, 32, 114, 101, 112, 114, 101, 104, 101, 110, 100, 101, 114, 105, 65, 63, 12, 105, 32, 105, 110, 1, 69, 16, 118, 111, 108, 117, 112, 65, 185, 1, 47, 24, 105, 116, 32, 101, 115, 115, 101, 1, 222, 64, 109, 32, 110, 105, 104, 105, 108, 32, 109, 111, 108, 101, 115, 116, 105, 97, 101, 46, 103, 0, 0, 44, 1, 45, 16, 32, 105, 108, 108, 117, 37, 143, 45, 36, 0, 109, 5, 110, 65, 33, 20, 97, 116, 32, 113, 117, 111, 17, 92, 44, 115, 32, 110, 117, 108, 108, 97, 32, 112, 97, 114, 105, 9, 165, 24, 65, 116, 32, 118, 101, 114, 111, 69, 34, 44, 101, 116, 32, 97, 99, 99, 117, 115, 97, 109, 117, 115, 1, 13, 104, 105, 117, 115, 116, 111, 32, 111, 100, 105, 111, 32, 100, 105, 103, 110, 105, 115, 115, 105, 109, 111, 115, 32, 100, 117, 99, 105, 1, 34, 80, 113, 117, 105, 32, 98, 108, 97, 110, 100, 105, 116, 105, 105, 115, 32, 112, 114, 97, 101, 115, 101, 101, 87, 17, 111, 56, 116, 117, 109, 32, 100, 101, 108, 101, 110, 105, 116, 105, 32, 97, 116, 65, 89, 28, 99, 111, 114, 114, 117, 112, 116, 105, 1, 150, 0, 115, 13, 174, 5, 109, 8, 113, 117, 97, 65, 5, 52, 108, 101, 115, 116, 105, 97, 115, 32, 101, 120, 99, 101, 112, 116, 0, 0, 0, 1, 0}, +} + +func TestSnappyEncode(t *testing.T) { + for src, exp := range snappyTestCases { + dst := Encode([]byte(src)) + if !bytes.Equal(dst, exp) { + t.Errorf("Expected %s to generate %v, but was %v", src, exp, dst) + } + } +} + +func TestSnappyDecode(t *testing.T) { + for exp, src := range snappyTestCases { + dst, err := Decode(src) + if err != nil { + t.Error("Encoding error: ", err) + } else if !bytes.Equal(dst, []byte(exp)) { + t.Errorf("Expected %s to be generated from %v, but was %s", exp, src, string(dst)) + } + } +} + +func TestSnappyDecodeStreams(t *testing.T) { + for exp, src := range snappyStreamTestCases { + dst, err := Decode(src) + if err != nil { + t.Error("Encoding error: ", err) + } else if !bytes.Equal(dst, []byte(exp)) { + t.Errorf("Expected %s to be generated from [%d]byte, but was %s", exp, len(src), string(dst)) + } + } +} diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore new file mode 100644 index 0000000000..836562412f --- /dev/null +++ b/vendor/github.com/eapache/queue/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml new file mode 100644 index 0000000000..235a40a493 --- /dev/null +++ b/vendor/github.com/eapache/queue/.travis.yml @@ -0,0 +1,7 @@ +language: go +sudo: false + +go: + - 1.2 + - 1.3 + - 1.4 diff --git a/vendor/github.com/eapache/queue/queue_test.go b/vendor/github.com/eapache/queue/queue_test.go new file mode 100644 index 0000000000..f2765c14d6 --- /dev/null +++ b/vendor/github.com/eapache/queue/queue_test.go @@ -0,0 +1,162 @@ +package queue + +import "testing" + +func TestQueueSimple(t *testing.T) { + q := New() + + for i := 0; i < minQueueLen; i++ { + q.Add(i) + } + for i := 0; i < minQueueLen; i++ { + if q.Peek().(int) != i { + t.Error("peek", i, "had value", q.Peek()) + } + q.Remove() + } +} + +func TestQueueWrapping(t *testing.T) { + q := New() + + for i := 0; i < minQueueLen; i++ { + q.Add(i) + } + for i := 0; i < 3; i++ { + q.Remove() + q.Add(minQueueLen + i) + } + + for i := 0; i < minQueueLen; i++ { + if q.Peek().(int) != i+3 { + t.Error("peek", i, "had value", q.Peek()) + } + q.Remove() + } +} + +func TestQueueLength(t *testing.T) { + q := New() + + if q.Length() != 0 { + t.Error("empty queue length not 0") + } + + for i := 0; i < 1000; i++ { + q.Add(i) + if q.Length() != i+1 { + t.Error("adding: queue with", i, "elements has length", q.Length()) + } + } + for i := 0; i < 1000; i++ { + q.Remove() + if q.Length() != 1000-i-1 { + t.Error("removing: queue with", 1000-i-i, "elements has length", q.Length()) + } + } +} + +func TestQueueGet(t *testing.T) { + q := New() + + for i := 0; i < 1000; i++ { + q.Add(i) + for j := 0; j < q.Length(); j++ { + if q.Get(j).(int) != j { + t.Errorf("index %d doesn't contain %d", j, j) + } + } + } +} + +func TestQueueGetOutOfRangePanics(t *testing.T) { + q := New() + + q.Add(1) + q.Add(2) + q.Add(3) + + assertPanics(t, "should panic when negative index", func() { + q.Get(-1) + }) + + assertPanics(t, "should panic when index greater than length", func() { + q.Get(4) + }) +} + +func TestQueuePeekOutOfRangePanics(t *testing.T) { + q := New() + + assertPanics(t, "should panic when peeking empty queue", func() { + q.Peek() + }) + + q.Add(1) + q.Remove() + + assertPanics(t, "should panic when peeking emptied queue", func() { + q.Peek() + }) +} + +func TestQueueRemoveOutOfRangePanics(t *testing.T) { + q := New() + + assertPanics(t, "should panic when removing empty queue", func() { + q.Remove() + }) + + q.Add(1) + q.Remove() + + assertPanics(t, "should panic when removing emptied queue", func() { + q.Remove() + }) +} + +func assertPanics(t *testing.T, name string, f func()) { + defer func() { + if r := recover(); r == nil { + t.Errorf("%s: didn't panic as expected", name) + } + }() + + f() +} + +// General warning: Go's benchmark utility (go test -bench .) increases the number of +// iterations until the benchmarks take a reasonable amount of time to run; memory usage +// is *NOT* considered. On my machine, these benchmarks hit around ~1GB before they've had +// enough, but if you have less than that available and start swapping, then all bets are off. + +func BenchmarkQueueSerial(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(nil) + } + for i := 0; i < b.N; i++ { + q.Peek() + q.Remove() + } +} + +func BenchmarkQueueGet(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(i) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + q.Get(i) + } +} + +func BenchmarkQueueTickTock(b *testing.B) { + q := New() + for i := 0; i < b.N; i++ { + q.Add(nil) + q.Peek() + q.Remove() + } +} diff --git a/vendor/github.com/go-macaron/binding/.gitignore b/vendor/github.com/go-macaron/binding/.gitignore new file mode 100644 index 0000000000..485dee64bc --- /dev/null +++ b/vendor/github.com/go-macaron/binding/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/vendor/github.com/go-macaron/binding/.travis.yml b/vendor/github.com/go-macaron/binding/.travis.yml new file mode 100644 index 0000000000..2462c6e19d --- /dev/null +++ b/vendor/github.com/go-macaron/binding/.travis.yml @@ -0,0 +1,15 @@ +sudo: false +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/vendor/github.com/go-macaron/binding/bind_test.go b/vendor/github.com/go-macaron/binding/bind_test.go new file mode 100644 index 0000000000..318ea7d7f9 --- /dev/null +++ b/vendor/github.com/go-macaron/binding/bind_test.go @@ -0,0 +1,57 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Bind(t *testing.T) { + Convey("Bind test", t, func() { + Convey("Bind form", func() { + for _, testCase := range formTestCases { + performFormTest(t, Bind, testCase) + } + }) + + Convey("Bind JSON", func() { + for _, testCase := range jsonTestCases { + performJsonTest(t, Bind, testCase) + } + }) + + Convey("Bind multipart form", func() { + for _, testCase := range multipartFormTestCases { + performMultipartFormTest(t, Bind, testCase) + } + }) + + Convey("Bind with file", func() { + for _, testCase := range fileTestCases { + performFileTest(t, Bind, testCase) + performFileTest(t, BindIgnErr, testCase) + } + }) + }) +} + +func Test_Version(t *testing.T) { + Convey("Get package version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} diff --git a/vendor/github.com/go-macaron/binding/common_test.go b/vendor/github.com/go-macaron/binding/common_test.go new file mode 100755 index 0000000000..2261e8006e --- /dev/null +++ b/vendor/github.com/go-macaron/binding/common_test.go @@ -0,0 +1,127 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "mime/multipart" + + "gopkg.in/macaron.v1" +) + +// These types are mostly contrived examples, but they're used +// across many test cases. The idea is to cover all the scenarios +// that this binding package might encounter in actual use. +type ( + // For basic test cases with a required field + Post struct { + Title string `form:"title" json:"title" binding:"Required"` + Content string `form:"content" json:"content"` + } + + // To be used as a nested struct (with a required field) + Person struct { + Name string `form:"name" json:"name" binding:"Required"` + Email string `form:"email" json:"email"` + } + + // For advanced test cases: multiple values, embedded + // and nested structs, an ignored field, and single + // and multiple file uploads + BlogPost struct { + Post + Id int `binding:"Required"` // JSON not specified here for test coverage + Ignored string `form:"-" json:"-"` + Ratings []int `form:"rating" json:"ratings"` + Author Person `json:"author"` + Coauthor *Person `json:"coauthor"` + HeaderImage *multipart.FileHeader + Pictures []*multipart.FileHeader `form:"picture"` + unexported string `form:"unexported"` + } + + EmbedPerson struct { + *Person + } + + SadForm struct { + AlphaDash string `form:"AlphaDash" binding:"AlphaDash"` + AlphaDashDot string `form:"AlphaDashDot" binding:"AlphaDashDot"` + Size string `form:"Size" binding:"Size(1)"` + SizeSlice []string `form:"SizeSlice" binding:"Size(1)"` + MinSize string `form:"MinSize" binding:"MinSize(5)"` + MinSizeSlice []string `form:"MinSizeSlice" binding:"MinSize(5)"` + MaxSize string `form:"MaxSize" binding:"MaxSize(1)"` + MaxSizeSlice []string `form:"MaxSizeSlice" binding:"MaxSize(1)"` + Range int `form:"Range" binding:"Range(1,2)"` + RangeInvalid int `form:"RangeInvalid" binding:"Range(1)"` + Email string `binding:"Email"` + Url string `form:"Url" binding:"Url"` + UrlEmpty string `form:"UrlEmpty" binding:"Url"` + In string `form:"In" binding:"Default(0);In(1,2,3)"` + InInvalid string `form:"InInvalid" binding:"In(1,2,3)"` + NotIn string `form:"NotIn" binding:"NotIn(1,2,3)"` + Include string `form:"Include" binding:"Include(a)"` + Exclude string `form:"Exclude" binding:"Exclude(a)"` + Empty string `binding:"OmitEmpty"` + } + + Group struct { + Name string `json:"name" binding:"Required"` + People []Person `json:"people" binding:"MinSize(1)"` + } + + CustomErrorHandle struct { + Rule `binding:"CustomRule"` + } + + // The common function signature of the handlers going under test. + handlerFunc func(interface{}, ...interface{}) macaron.Handler + + // Used for testing mapping an interface to the context + // If used (withInterface = true in the testCases), a modeler + // should be mapped to the context as well as BlogPost, meaning + // you can receive a modeler in your application instead of a + // concrete BlogPost. + modeler interface { + Model() string + } +) + +func (p Post) Validate(ctx *macaron.Context, errs Errors) Errors { + if len(p.Title) < 10 { + errs = append(errs, Error{ + FieldNames: []string{"title"}, + Classification: "LengthError", + Message: "Life is too short", + }) + } + return errs +} + +func (p Post) Model() string { + return p.Title +} + +func (g Group) Model() string { + return g.Name +} + +func (_ CustomErrorHandle) Error(_ *macaron.Context, _ Errors) {} + +const ( + testRoute = "/test" + formContentType = "application/x-www-form-urlencoded" +) diff --git a/vendor/github.com/go-macaron/binding/errorhandler_test.go b/vendor/github.com/go-macaron/binding/errorhandler_test.go new file mode 100755 index 0000000000..b74a812eac --- /dev/null +++ b/vendor/github.com/go-macaron/binding/errorhandler_test.go @@ -0,0 +1,162 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "io/ioutil" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +var errorTestCases = []errorTestCase{ + { + description: "No errors", + errors: Errors{}, + expected: errorTestResult{ + statusCode: http.StatusOK, + }, + }, + { + description: "Deserialization error", + errors: Errors{ + { + Classification: ERR_DESERIALIZATION, + Message: "Some parser error here", + }, + }, + expected: errorTestResult{ + statusCode: http.StatusBadRequest, + contentType: _JSON_CONTENT_TYPE, + body: `[{"classification":"DeserializationError","message":"Some parser error here"}]`, + }, + }, + { + description: "Content-Type error", + errors: Errors{ + { + Classification: ERR_CONTENT_TYPE, + Message: "Empty Content-Type", + }, + }, + expected: errorTestResult{ + statusCode: http.StatusUnsupportedMediaType, + contentType: _JSON_CONTENT_TYPE, + body: `[{"classification":"ContentTypeError","message":"Empty Content-Type"}]`, + }, + }, + { + description: "Requirement error", + errors: Errors{ + { + FieldNames: []string{"some_field"}, + Classification: ERR_REQUIRED, + Message: "Required", + }, + }, + expected: errorTestResult{ + statusCode: STATUS_UNPROCESSABLE_ENTITY, + contentType: _JSON_CONTENT_TYPE, + body: `[{"fieldNames":["some_field"],"classification":"RequiredError","message":"Required"}]`, + }, + }, + { + description: "Bad header error", + errors: Errors{ + { + Classification: "HeaderError", + Message: "The X-Something header must be specified", + }, + }, + expected: errorTestResult{ + statusCode: STATUS_UNPROCESSABLE_ENTITY, + contentType: _JSON_CONTENT_TYPE, + body: `[{"classification":"HeaderError","message":"The X-Something header must be specified"}]`, + }, + }, + { + description: "Custom field error", + errors: Errors{ + { + FieldNames: []string{"month", "year"}, + Classification: "DateError", + Message: "The month and year must be in the future", + }, + }, + expected: errorTestResult{ + statusCode: STATUS_UNPROCESSABLE_ENTITY, + contentType: _JSON_CONTENT_TYPE, + body: `[{"fieldNames":["month","year"],"classification":"DateError","message":"The month and year must be in the future"}]`, + }, + }, + { + description: "Multiple errors", + errors: Errors{ + { + FieldNames: []string{"foo"}, + Classification: ERR_REQUIRED, + Message: "Required", + }, + { + FieldNames: []string{"foo"}, + Classification: "LengthError", + Message: "The length of the 'foo' field is too short", + }, + }, + expected: errorTestResult{ + statusCode: STATUS_UNPROCESSABLE_ENTITY, + contentType: _JSON_CONTENT_TYPE, + body: `[{"fieldNames":["foo"],"classification":"RequiredError","message":"Required"},{"fieldNames":["foo"],"classification":"LengthError","message":"The length of the 'foo' field is too short"}]`, + }, + }, +} + +func Test_ErrorHandler(t *testing.T) { + Convey("Error handler", t, func() { + for _, testCase := range errorTestCases { + performErrorTest(t, testCase) + } + }) +} + +func performErrorTest(t *testing.T, testCase errorTestCase) { + resp := httptest.NewRecorder() + + errorHandler(testCase.errors, resp) + + So(resp.Code, ShouldEqual, testCase.expected.statusCode) + So(resp.Header().Get("Content-Type"), ShouldEqual, testCase.expected.contentType) + + actualBody, err := ioutil.ReadAll(resp.Body) + So(err, ShouldBeNil) + So(string(actualBody), ShouldEqual, testCase.expected.body) +} + +type ( + errorTestCase struct { + description string + errors Errors + expected errorTestResult + } + + errorTestResult struct { + statusCode int + contentType string + body string + } +) diff --git a/vendor/github.com/go-macaron/binding/errors_test.go b/vendor/github.com/go-macaron/binding/errors_test.go new file mode 100755 index 0000000000..0e9659c374 --- /dev/null +++ b/vendor/github.com/go-macaron/binding/errors_test.go @@ -0,0 +1,115 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "fmt" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_ErrorsAdd(t *testing.T) { + Convey("Add new error", t, func() { + var actual Errors + expected := Errors{ + Error{ + FieldNames: []string{"Field1", "Field2"}, + Classification: "ErrorClass", + Message: "Some message", + }, + } + + actual.Add(expected[0].FieldNames, expected[0].Classification, expected[0].Message) + + So(len(actual), ShouldEqual, 1) + So(fmt.Sprintf("%#v", actual), ShouldEqual, fmt.Sprintf("%#v", expected)) + }) +} + +func Test_ErrorsLen(t *testing.T) { + Convey("Get number of errors", t, func() { + So(errorsTestSet.Len(), ShouldEqual, len(errorsTestSet)) + }) +} + +func Test_ErrorsHas(t *testing.T) { + Convey("Check error class", t, func() { + So(errorsTestSet.Has("ClassA"), ShouldBeTrue) + So(errorsTestSet.Has("ClassQ"), ShouldBeFalse) + }) +} + +func Test_ErrorGetters(t *testing.T) { + Convey("Get error detail", t, func() { + err := Error{ + FieldNames: []string{"field1", "field2"}, + Classification: "ErrorClass", + Message: "The message", + } + + fieldsActual := err.Fields() + + So(len(fieldsActual), ShouldEqual, 2) + So(fieldsActual[0], ShouldEqual, "field1") + So(fieldsActual[1], ShouldEqual, "field2") + + So(err.Kind(), ShouldEqual, "ErrorClass") + So(err.Error(), ShouldEqual, "The message") + }) +} + +/* +func TestErrorsWithClass(t *testing.T) { + expected := Errors{ + errorsTestSet[0], + errorsTestSet[3], + } + actualStr := fmt.Sprintf("%#v", errorsTestSet.WithClass("ClassA")) + expectedStr := fmt.Sprintf("%#v", expected) + if actualStr != expectedStr { + t.Errorf("Expected:\n%s\nbut got:\n%s", expectedStr, actualStr) + } +} +*/ + +var errorsTestSet = Errors{ + Error{ + FieldNames: []string{}, + Classification: "ClassA", + Message: "Foobar", + }, + Error{ + FieldNames: []string{}, + Classification: "ClassB", + Message: "Foo", + }, + Error{ + FieldNames: []string{"field1", "field2"}, + Classification: "ClassB", + Message: "Foobar", + }, + Error{ + FieldNames: []string{"field2"}, + Classification: "ClassA", + Message: "Foobar", + }, + Error{ + FieldNames: []string{"field2"}, + Classification: "ClassB", + Message: "Foobar", + }, +} diff --git a/vendor/github.com/go-macaron/binding/file_test.go b/vendor/github.com/go-macaron/binding/file_test.go new file mode 100755 index 0000000000..dae854ea1e --- /dev/null +++ b/vendor/github.com/go-macaron/binding/file_test.go @@ -0,0 +1,191 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "bytes" + "mime/multipart" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +var fileTestCases = []fileTestCase{ + { + description: "Single file", + singleFile: &fileInfo{ + fileName: "message.txt", + data: "All your binding are belong to us", + }, + }, + { + description: "Multiple files", + multipleFiles: []*fileInfo{ + &fileInfo{ + fileName: "cool-gopher-fact.txt", + data: "Did you know? https://plus.google.com/+MatthewHolt/posts/GmVfd6TPJ51", + }, + &fileInfo{ + fileName: "gophercon2014.txt", + data: "@bradfitz has a Go time machine: https://twitter.com/mholt6/status/459463953395875840", + }, + }, + }, + { + description: "Single file and multiple files", + singleFile: &fileInfo{ + fileName: "social media.txt", + data: "Hey, you should follow @mholt6 (Twitter) or +MatthewHolt (Google+)", + }, + multipleFiles: []*fileInfo{ + &fileInfo{ + fileName: "thank you!", + data: "Also, thanks to all the contributors of this package!", + }, + &fileInfo{ + fileName: "btw...", + data: "This tool translates JSON into Go structs: http://mholt.github.io/json-to-go/", + }, + }, + }, +} + +func Test_FileUploads(t *testing.T) { + Convey("Test file upload", t, func() { + for _, testCase := range fileTestCases { + performFileTest(t, MultipartForm, testCase) + } + }) +} + +func performFileTest(t *testing.T, binder handlerFunc, testCase fileTestCase) { + httpRecorder := httptest.NewRecorder() + m := macaron.Classic() + + fileTestHandler := func(actual BlogPost, errs Errors) { + assertFileAsExpected(t, testCase, actual.HeaderImage, testCase.singleFile) + So(len(testCase.multipleFiles), ShouldEqual, len(actual.Pictures)) + + for i, expectedFile := range testCase.multipleFiles { + if i >= len(actual.Pictures) { + break + } + assertFileAsExpected(t, testCase, actual.Pictures[i], expectedFile) + } + } + + m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { + fileTestHandler(actual, errs) + }) + + m.ServeHTTP(httpRecorder, buildRequestWithFile(testCase)) + + switch httpRecorder.Code { + case http.StatusNotFound: + panic("Routing is messed up in test fixture (got 404): check methods and paths") + case http.StatusInternalServerError: + panic("Something bad happened on '" + testCase.description + "'") + } +} + +func assertFileAsExpected(t *testing.T, testCase fileTestCase, actual *multipart.FileHeader, expected *fileInfo) { + if expected == nil && actual == nil { + return + } + + if expected != nil && actual == nil { + So(actual, ShouldNotBeNil) + return + } else if expected == nil && actual != nil { + So(actual, ShouldBeNil) + return + } + + So(actual.Filename, ShouldEqual, expected.fileName) + So(unpackFileHeaderData(actual), ShouldEqual, expected.data) +} + +func buildRequestWithFile(testCase fileTestCase) *http.Request { + b := &bytes.Buffer{} + w := multipart.NewWriter(b) + + if testCase.singleFile != nil { + formFileSingle, err := w.CreateFormFile("header_image", testCase.singleFile.fileName) + if err != nil { + panic("Could not create FormFile (single file): " + err.Error()) + } + formFileSingle.Write([]byte(testCase.singleFile.data)) + } + + for _, file := range testCase.multipleFiles { + formFileMultiple, err := w.CreateFormFile("picture", file.fileName) + if err != nil { + panic("Could not create FormFile (multiple files): " + err.Error()) + } + formFileMultiple.Write([]byte(file.data)) + } + + err := w.Close() + if err != nil { + panic("Could not close multipart writer: " + err.Error()) + } + + req, err := http.NewRequest("POST", testRoute, b) + if err != nil { + panic("Could not create file upload request: " + err.Error()) + } + + req.Header.Set("Content-Type", w.FormDataContentType()) + + return req +} + +func unpackFileHeaderData(fh *multipart.FileHeader) string { + if fh == nil { + return "" + } + + f, err := fh.Open() + if err != nil { + panic("Could not open file header:" + err.Error()) + } + defer f.Close() + + var fb bytes.Buffer + _, err = fb.ReadFrom(f) + if err != nil { + panic("Could not read from file header:" + err.Error()) + } + + return fb.String() +} + +type ( + fileTestCase struct { + description string + input BlogPost + singleFile *fileInfo + multipleFiles []*fileInfo + } + + fileInfo struct { + fileName string + data string + } +) diff --git a/vendor/github.com/go-macaron/binding/form_test.go b/vendor/github.com/go-macaron/binding/form_test.go new file mode 100755 index 0000000000..601ecaf868 --- /dev/null +++ b/vendor/github.com/go-macaron/binding/form_test.go @@ -0,0 +1,282 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "fmt" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +var formTestCases = []formTestCase{ + { + description: "Happy path", + shouldSucceed: true, + payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, + contentType: formContentType, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Happy path with interface", + shouldSucceed: true, + withInterface: true, + payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, + contentType: formContentType, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Empty payload", + shouldSucceed: false, + payload: ``, + contentType: formContentType, + expected: Post{}, + }, + { + description: "Empty content type", + shouldSucceed: false, + payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, + contentType: ``, + expected: Post{}, + }, + { + description: "Malformed form body", + shouldSucceed: false, + payload: `title=%2`, + contentType: formContentType, + expected: Post{}, + }, + { + description: "With nested and embedded structs", + shouldSucceed: true, + payload: `title=Glorious+Post+Title&id=1&name=Matt+Holt`, + contentType: formContentType, + expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Required embedded struct field not specified", + shouldSucceed: false, + payload: `id=1&name=Matt+Holt`, + contentType: formContentType, + expected: BlogPost{Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Required nested struct field not specified", + shouldSucceed: false, + payload: `title=Glorious+Post+Title&id=1`, + contentType: formContentType, + expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1}, + }, + { + description: "Multiple values into slice", + shouldSucceed: true, + payload: `title=Glorious+Post+Title&id=1&name=Matt+Holt&rating=4&rating=3&rating=5`, + contentType: formContentType, + expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}, Ratings: []int{4, 3, 5}}, + }, + { + description: "Unexported field", + shouldSucceed: true, + payload: `title=Glorious+Post+Title&id=1&name=Matt+Holt&unexported=foo`, + contentType: formContentType, + expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Query string POST", + shouldSucceed: true, + payload: `title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet`, + contentType: formContentType, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Query string with Content-Type (POST request)", + shouldSucceed: true, + queryString: "?title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet", + payload: ``, + contentType: formContentType, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Query string without Content-Type (GET request)", + shouldSucceed: true, + method: "GET", + queryString: "?title=Glorious+Post+Title&content=Lorem+ipsum+dolor+sit+amet", + payload: ``, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Embed struct pointer", + shouldSucceed: true, + deepEqual: true, + method: "GET", + queryString: "?name=Glorious+Post+Title&email=Lorem+ipsum+dolor+sit+amet", + payload: ``, + expected: EmbedPerson{&Person{Name: "Glorious Post Title", Email: "Lorem ipsum dolor sit amet"}}, + }, + { + description: "Embed struct pointer remain nil if not binded", + shouldSucceed: true, + deepEqual: true, + method: "GET", + queryString: "?", + payload: ``, + expected: EmbedPerson{nil}, + }, + { + description: "Custom error handler", + shouldSucceed: true, + deepEqual: true, + method: "GET", + queryString: "?", + payload: ``, + expected: CustomErrorHandle{}, + }, +} + +func init() { + AddRule(&Rule{ + func(rule string) bool { + return rule == "CustomRule" + }, + func(errs Errors, _ string, _ interface{}) (bool, Errors) { + return false, errs + }, + }) + SetNameMapper(nameMapper) +} + +func Test_Form(t *testing.T) { + Convey("Test form", t, func() { + for _, testCase := range formTestCases { + performFormTest(t, Form, testCase) + } + }) +} + +func performFormTest(t *testing.T, binder handlerFunc, testCase formTestCase) { + resp := httptest.NewRecorder() + m := macaron.Classic() + + formTestHandler := func(actual interface{}, errs Errors) { + if testCase.shouldSucceed && len(errs) > 0 { + So(len(errs), ShouldEqual, 0) + } else if !testCase.shouldSucceed && len(errs) == 0 { + So(len(errs), ShouldNotEqual, 0) + } + expString := fmt.Sprintf("%+v", testCase.expected) + actString := fmt.Sprintf("%+v", actual) + if actString != expString && !(testCase.deepEqual && reflect.DeepEqual(testCase.expected, actual)) { + So(actString, ShouldEqual, expString) + } + } + + switch testCase.expected.(type) { + case Post: + if testCase.withInterface { + m.Post(testRoute, binder(Post{}, (*modeler)(nil)), func(actual Post, iface modeler, errs Errors) { + So(actual.Title, ShouldEqual, iface.Model()) + formTestHandler(actual, errs) + }) + } else { + m.Post(testRoute, binder(Post{}), func(actual Post, errs Errors) { + formTestHandler(actual, errs) + }) + m.Get(testRoute, binder(Post{}), func(actual Post, errs Errors) { + formTestHandler(actual, errs) + }) + } + + case BlogPost: + if testCase.withInterface { + m.Post(testRoute, binder(BlogPost{}, (*modeler)(nil)), func(actual BlogPost, iface modeler, errs Errors) { + So(actual.Title, ShouldEqual, iface.Model()) + formTestHandler(actual, errs) + }) + } else { + m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { + formTestHandler(actual, errs) + }) + } + + case EmbedPerson: + m.Post(testRoute, binder(EmbedPerson{}), func(actual EmbedPerson, errs Errors) { + formTestHandler(actual, errs) + }) + m.Get(testRoute, binder(EmbedPerson{}), func(actual EmbedPerson, errs Errors) { + formTestHandler(actual, errs) + }) + case CustomErrorHandle: + m.Get(testRoute, binder(CustomErrorHandle{}), func(actual CustomErrorHandle, errs Errors) { + formTestHandler(actual, errs) + }) + } + + if len(testCase.method) == 0 { + testCase.method = "POST" + } + + req, err := http.NewRequest(testCase.method, testRoute+testCase.queryString, strings.NewReader(testCase.payload)) + if err != nil { + panic(err) + } + req.Header.Set("Content-Type", testCase.contentType) + + m.ServeHTTP(resp, req) + + switch resp.Code { + case http.StatusNotFound: + panic("Routing is messed up in test fixture (got 404): check methods and paths") + case http.StatusInternalServerError: + panic("Something bad happened on '" + testCase.description + "'") + } +} + +type ( + formTestCase struct { + description string + shouldSucceed bool + deepEqual bool + withInterface bool + queryString string + payload string + contentType string + expected interface{} + method string + } +) + +type defaultForm struct { + Default string `binding:"Default(hello world)"` +} + +func Test_Default(t *testing.T) { + Convey("Test default value", t, func() { + m := macaron.Classic() + m.Get("/", Bind(defaultForm{}), func(f defaultForm) { + So(f.Default, ShouldEqual, "hello world") + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + + m.ServeHTTP(resp, req) + }) +} diff --git a/vendor/github.com/go-macaron/binding/json_test.go b/vendor/github.com/go-macaron/binding/json_test.go new file mode 100755 index 0000000000..9ad9d873b3 --- /dev/null +++ b/vendor/github.com/go-macaron/binding/json_test.go @@ -0,0 +1,240 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +var jsonTestCases = []jsonTestCase{ + { + description: "Happy path", + shouldSucceedOnJson: true, + payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, + contentType: _JSON_CONTENT_TYPE, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Happy path with interface", + shouldSucceedOnJson: true, + withInterface: true, + payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, + contentType: _JSON_CONTENT_TYPE, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Nil payload", + shouldSucceedOnJson: false, + payload: `-nil-`, + contentType: _JSON_CONTENT_TYPE, + expected: Post{}, + }, + { + description: "Empty payload", + shouldSucceedOnJson: false, + payload: ``, + contentType: _JSON_CONTENT_TYPE, + expected: Post{}, + }, + { + description: "Empty content type", + shouldSucceedOnJson: true, + shouldFailOnBind: true, + payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, + contentType: ``, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Unsupported content type", + shouldSucceedOnJson: true, + shouldFailOnBind: true, + payload: `{"title": "Glorious Post Title", "content": "Lorem ipsum dolor sit amet"}`, + contentType: `BoGuS`, + expected: Post{Title: "Glorious Post Title", Content: "Lorem ipsum dolor sit amet"}, + }, + { + description: "Malformed JSON", + shouldSucceedOnJson: false, + payload: `{"title":"foo"`, + contentType: _JSON_CONTENT_TYPE, + expected: Post{}, + }, + { + description: "Deserialization with nested and embedded struct", + shouldSucceedOnJson: true, + payload: `{"title":"Glorious Post Title", "id":1, "author":{"name":"Matt Holt"}}`, + contentType: _JSON_CONTENT_TYPE, + expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Deserialization with nested and embedded struct with interface", + shouldSucceedOnJson: true, + withInterface: true, + payload: `{"title":"Glorious Post Title", "id":1, "author":{"name":"Matt Holt"}}`, + contentType: _JSON_CONTENT_TYPE, + expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Required nested struct field not specified", + shouldSucceedOnJson: false, + payload: `{"title":"Glorious Post Title", "id":1, "author":{}}`, + contentType: _JSON_CONTENT_TYPE, + expected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1}, + }, + { + description: "Required embedded struct field not specified", + shouldSucceedOnJson: false, + payload: `{"id":1, "author":{"name":"Matt Holt"}}`, + contentType: _JSON_CONTENT_TYPE, + expected: BlogPost{Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Slice of Posts", + shouldSucceedOnJson: true, + payload: `[{"title": "First Post"}, {"title": "Second Post"}]`, + contentType: _JSON_CONTENT_TYPE, + expected: []Post{Post{Title: "First Post"}, Post{Title: "Second Post"}}, + }, + { + description: "Slice of structs", + shouldSucceedOnJson: true, + payload: `{"name": "group1", "people": [{"name":"awoods"}, {"name": "anthony"}]}`, + contentType: _JSON_CONTENT_TYPE, + expected: Group{Name: "group1", People: []Person{Person{Name: "awoods"}, Person{Name: "anthony"}}}, + }, +} + +func Test_Json(t *testing.T) { + Convey("Test JSON", t, func() { + for _, testCase := range jsonTestCases { + performJsonTest(t, Json, testCase) + } + }) +} + +func performJsonTest(t *testing.T, binder handlerFunc, testCase jsonTestCase) { + var payload io.Reader + httpRecorder := httptest.NewRecorder() + m := macaron.Classic() + + jsonTestHandler := func(actual interface{}, errs Errors) { + if testCase.shouldSucceedOnJson && len(errs) > 0 { + So(len(errs), ShouldEqual, 0) + } else if !testCase.shouldSucceedOnJson && len(errs) == 0 { + So(len(errs), ShouldNotEqual, 0) + } + So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", testCase.expected)) + } + + switch testCase.expected.(type) { + case []Post: + if testCase.withInterface { + m.Post(testRoute, binder([]Post{}, (*modeler)(nil)), func(actual []Post, iface modeler, errs Errors) { + + for _, a := range actual { + So(a.Title, ShouldEqual, iface.Model()) + jsonTestHandler(a, errs) + } + }) + } else { + m.Post(testRoute, binder([]Post{}), func(actual []Post, errs Errors) { + jsonTestHandler(actual, errs) + }) + } + + case Post: + if testCase.withInterface { + m.Post(testRoute, binder(Post{}, (*modeler)(nil)), func(actual Post, iface modeler, errs Errors) { + So(actual.Title, ShouldEqual, iface.Model()) + jsonTestHandler(actual, errs) + }) + } else { + m.Post(testRoute, binder(Post{}), func(actual Post, errs Errors) { + jsonTestHandler(actual, errs) + }) + } + + case BlogPost: + if testCase.withInterface { + m.Post(testRoute, binder(BlogPost{}, (*modeler)(nil)), func(actual BlogPost, iface modeler, errs Errors) { + So(actual.Title, ShouldEqual, iface.Model()) + jsonTestHandler(actual, errs) + }) + } else { + m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { + jsonTestHandler(actual, errs) + }) + } + case Group: + if testCase.withInterface { + m.Post(testRoute, binder(Group{}, (*modeler)(nil)), func(actual Group, iface modeler, errs Errors) { + So(actual.Name, ShouldEqual, iface.Model()) + jsonTestHandler(actual, errs) + }) + } else { + m.Post(testRoute, binder(Group{}), func(actual Group, errs Errors) { + jsonTestHandler(actual, errs) + }) + } + } + + if testCase.payload == "-nil-" { + payload = nil + } else { + payload = strings.NewReader(testCase.payload) + } + + req, err := http.NewRequest("POST", testRoute, payload) + if err != nil { + panic(err) + } + req.Header.Set("Content-Type", testCase.contentType) + + m.ServeHTTP(httpRecorder, req) + + switch httpRecorder.Code { + case http.StatusNotFound: + panic("Routing is messed up in test fixture (got 404): check method and path") + case http.StatusInternalServerError: + panic("Something bad happened on '" + testCase.description + "'") + default: + if testCase.shouldSucceedOnJson && + httpRecorder.Code != http.StatusOK && + !testCase.shouldFailOnBind { + So(httpRecorder.Code, ShouldEqual, http.StatusOK) + } + } +} + +type ( + jsonTestCase struct { + description string + withInterface bool + shouldSucceedOnJson bool + shouldFailOnBind bool + payload string + contentType string + expected interface{} + } +) diff --git a/vendor/github.com/go-macaron/binding/misc_test.go b/vendor/github.com/go-macaron/binding/misc_test.go new file mode 100755 index 0000000000..2770cd14db --- /dev/null +++ b/vendor/github.com/go-macaron/binding/misc_test.go @@ -0,0 +1,123 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +// When binding from Form data, testing the type of data to bind +// and converting a string into that type is tedious, so these tests +// cover all those cases. +func Test_SetWithProperType(t *testing.T) { + Convey("Set with proper type", t, func() { + testInputs := map[string]string{ + "successful": `integer=-1&integer8=-8&integer16=-16&integer32=-32&integer64=-64&uinteger=1&uinteger8=8&uinteger16=16&uinteger32=32&uinteger64=64&boolean_1=true&fl32_1=32.3232&fl64_1=-64.6464646464&str=string`, + "errorful": `integer=&integer8=asdf&integer16=--&integer32=&integer64=dsf&uinteger=&uinteger8=asdf&uinteger16=+&uinteger32= 32 &uinteger64=+%20+&boolean_1=&boolean_2=asdf&fl32_1=asdf&fl32_2=&fl64_1=&fl64_2=asdfstr`, + } + + expectedOutputs := map[string]Everything{ + "successful": Everything{ + Integer: -1, + Integer8: -8, + Integer16: -16, + Integer32: -32, + Integer64: -64, + Uinteger: 1, + Uinteger8: 8, + Uinteger16: 16, + Uinteger32: 32, + Uinteger64: 64, + Boolean_1: true, + Fl32_1: 32.3232, + Fl64_1: -64.6464646464, + Str: "string", + }, + "errorful": Everything{}, + } + + for key, testCase := range testInputs { + httpRecorder := httptest.NewRecorder() + m := macaron.Classic() + + m.Post(testRoute, Form(Everything{}), func(actual Everything, errs Errors) { + So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", expectedOutputs[key])) + if key == "errorful" { + So(errs, ShouldHaveLength, 10) + } else { + So(errs, ShouldHaveLength, 0) + } + }) + req, err := http.NewRequest("POST", testRoute, strings.NewReader(testCase)) + if err != nil { + panic(err) + } + req.Header.Set("Content-Type", formContentType) + m.ServeHTTP(httpRecorder, req) + } + }) +} + +// Each binder middleware should assert that the struct passed in is not +// a pointer (to avoid race conditions) +func Test_EnsureNotPointer(t *testing.T) { + Convey("Ensure field is not a pointer", t, func() { + shouldPanic := func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + ensureNotPointer(&Post{}) + } + + shouldNotPanic := func() { + defer func() { + So(recover(), ShouldBeNil) + }() + ensureNotPointer(Post{}) + } + + shouldPanic() + shouldNotPanic() + }) +} + +// Used in testing setWithProperType; kind of clunky... +type Everything struct { + Integer int `form:"integer"` + Integer8 int8 `form:"integer8"` + Integer16 int16 `form:"integer16"` + Integer32 int32 `form:"integer32"` + Integer64 int64 `form:"integer64"` + Uinteger uint `form:"uinteger"` + Uinteger8 uint8 `form:"uinteger8"` + Uinteger16 uint16 `form:"uinteger16"` + Uinteger32 uint32 `form:"uinteger32"` + Uinteger64 uint64 `form:"uinteger64"` + Boolean_1 bool `form:"boolean_1"` + Boolean_2 bool `form:"boolean_2"` + Fl32_1 float32 `form:"fl32_1"` + Fl32_2 float32 `form:"fl32_2"` + Fl64_1 float64 `form:"fl64_1"` + Fl64_2 float64 `form:"fl64_2"` + Str string `form:"str"` +} diff --git a/vendor/github.com/go-macaron/binding/multipart_test.go b/vendor/github.com/go-macaron/binding/multipart_test.go new file mode 100755 index 0000000000..9b08058de6 --- /dev/null +++ b/vendor/github.com/go-macaron/binding/multipart_test.go @@ -0,0 +1,155 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "bytes" + "fmt" + "mime/multipart" + "net/http" + "net/http/httptest" + "strconv" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +var multipartFormTestCases = []multipartFormTestCase{ + { + description: "Happy multipart form path", + shouldSucceed: true, + inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "FormValue called before req.MultipartReader(); see https://github.com/martini-contrib/csrf/issues/6", + shouldSucceed: true, + callFormValueBefore: true, + inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Empty payload", + shouldSucceed: false, + inputAndExpected: BlogPost{}, + }, + { + description: "Missing required field (Id)", + shouldSucceed: false, + inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Required embedded struct field not specified", + shouldSucceed: false, + inputAndExpected: BlogPost{Id: 1, Author: Person{Name: "Matt Holt"}}, + }, + { + description: "Required nested struct field not specified", + shouldSucceed: false, + inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1}, + }, + { + description: "Multiple values", + shouldSucceed: true, + inputAndExpected: BlogPost{Post: Post{Title: "Glorious Post Title"}, Id: 1, Author: Person{Name: "Matt Holt"}, Ratings: []int{3, 5, 4}}, + }, + { + description: "Bad multipart encoding", + shouldSucceed: false, + malformEncoding: true, + }, +} + +func Test_MultipartForm(t *testing.T) { + Convey("Test multipart form", t, func() { + for _, testCase := range multipartFormTestCases { + performMultipartFormTest(t, MultipartForm, testCase) + } + }) +} + +func performMultipartFormTest(t *testing.T, binder handlerFunc, testCase multipartFormTestCase) { + httpRecorder := httptest.NewRecorder() + m := macaron.Classic() + + m.Post(testRoute, binder(BlogPost{}), func(actual BlogPost, errs Errors) { + if testCase.shouldSucceed && len(errs) > 0 { + So(len(errs), ShouldEqual, 0) + } else if !testCase.shouldSucceed && len(errs) == 0 { + So(len(errs), ShouldNotEqual, 0) + } + So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", testCase.inputAndExpected)) + }) + + multipartPayload, mpWriter := makeMultipartPayload(testCase) + + req, err := http.NewRequest("POST", testRoute, multipartPayload) + if err != nil { + panic(err) + } + + req.Header.Add("Content-Type", mpWriter.FormDataContentType()) + + err = mpWriter.Close() + if err != nil { + panic(err) + } + + if testCase.callFormValueBefore { + req.FormValue("foo") + } + + m.ServeHTTP(httpRecorder, req) + + switch httpRecorder.Code { + case http.StatusNotFound: + panic("Routing is messed up in test fixture (got 404): check methods and paths") + case http.StatusInternalServerError: + panic("Something bad happened on '" + testCase.description + "'") + } +} + +// Writes the input from a test case into a buffer using the multipart writer. +func makeMultipartPayload(testCase multipartFormTestCase) (*bytes.Buffer, *multipart.Writer) { + body := &bytes.Buffer{} + writer := multipart.NewWriter(body) + if testCase.malformEncoding { + // TODO: Break the multipart form parser which is apparently impervious!! + // (Get it to return an error. Trying to get 100% test coverage.) + body.Write([]byte(`--` + writer.Boundary() + `\nContent-Disposition: form-data; name="foo"\n\n--` + writer.Boundary() + `--`)) + return body, writer + } else { + writer.WriteField("title", testCase.inputAndExpected.Title) + writer.WriteField("content", testCase.inputAndExpected.Content) + writer.WriteField("id", strconv.Itoa(testCase.inputAndExpected.Id)) + writer.WriteField("ignored", testCase.inputAndExpected.Ignored) + for _, value := range testCase.inputAndExpected.Ratings { + writer.WriteField("rating", strconv.Itoa(value)) + } + writer.WriteField("name", testCase.inputAndExpected.Author.Name) + writer.WriteField("email", testCase.inputAndExpected.Author.Email) + return body, writer + } +} + +type ( + multipartFormTestCase struct { + description string + shouldSucceed bool + inputAndExpected BlogPost + malformEncoding bool + callFormValueBefore bool + } +) diff --git a/vendor/github.com/go-macaron/binding/validate_test.go b/vendor/github.com/go-macaron/binding/validate_test.go new file mode 100755 index 0000000000..55e6a4de85 --- /dev/null +++ b/vendor/github.com/go-macaron/binding/validate_test.go @@ -0,0 +1,412 @@ +// Copyright 2014 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package binding + +import ( + "fmt" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +var validationTestCases = []validationTestCase{ + { + description: "No errors", + data: BlogPost{ + Id: 1, + Post: Post{ + Title: "Behold The Title!", + Content: "And some content", + }, + Author: Person{ + Name: "Matt Holt", + }, + }, + expectedErrors: Errors{}, + }, + { + description: "ID required", + data: BlogPost{ + Post: Post{ + Title: "Behold The Title!", + Content: "And some content", + }, + Author: Person{ + Name: "Matt Holt", + }, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"id"}, + Classification: ERR_REQUIRED, + Message: "Required", + }, + }, + }, + { + description: "Embedded struct field required", + data: BlogPost{ + Id: 1, + Post: Post{ + Content: "Content given, but title is required", + }, + Author: Person{ + Name: "Matt Holt", + }, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"title"}, + Classification: ERR_REQUIRED, + Message: "Required", + }, + Error{ + FieldNames: []string{"title"}, + Classification: "LengthError", + Message: "Life is too short", + }, + }, + }, + { + description: "Nested struct field required", + data: BlogPost{ + Id: 1, + Post: Post{ + Title: "Behold The Title!", + Content: "And some content", + }, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"name"}, + Classification: ERR_REQUIRED, + Message: "Required", + }, + }, + }, + { + description: "Required field missing in nested struct pointer", + data: BlogPost{ + Id: 1, + Post: Post{ + Title: "Behold The Title!", + Content: "And some content", + }, + Author: Person{ + Name: "Matt Holt", + }, + Coauthor: &Person{}, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"name"}, + Classification: ERR_REQUIRED, + Message: "Required", + }, + }, + }, + { + description: "All required fields specified in nested struct pointer", + data: BlogPost{ + Id: 1, + Post: Post{ + Title: "Behold The Title!", + Content: "And some content", + }, + Author: Person{ + Name: "Matt Holt", + }, + Coauthor: &Person{ + Name: "Jeremy Saenz", + }, + }, + expectedErrors: Errors{}, + }, + { + description: "Custom validation should put an error", + data: BlogPost{ + Id: 1, + Post: Post{ + Title: "Too short", + Content: "And some content", + }, + Author: Person{ + Name: "Matt Holt", + }, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"title"}, + Classification: "LengthError", + Message: "Life is too short", + }, + }, + }, + { + description: "List Validation", + data: []BlogPost{ + BlogPost{ + Id: 1, + Post: Post{ + Title: "First Post", + Content: "And some content", + }, + Author: Person{ + Name: "Leeor Aharon", + }, + }, + BlogPost{ + Id: 2, + Post: Post{ + Title: "Second Post", + Content: "And some content", + }, + Author: Person{ + Name: "Leeor Aharon", + }, + }, + }, + expectedErrors: Errors{}, + }, + { + description: "List Validation w/ Errors", + data: []BlogPost{ + BlogPost{ + Id: 1, + Post: Post{ + Title: "First Post", + Content: "And some content", + }, + Author: Person{ + Name: "Leeor Aharon", + }, + }, + BlogPost{ + Id: 2, + Post: Post{ + Title: "Too Short", + Content: "And some content", + }, + Author: Person{ + Name: "Leeor Aharon", + }, + }, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"title"}, + Classification: "LengthError", + Message: "Life is too short", + }, + }, + }, + { + description: "List of invalid custom validations", + data: []SadForm{ + SadForm{ + AlphaDash: ",", + AlphaDashDot: ",", + Size: "123", + SizeSlice: []string{"1", "2", "3"}, + MinSize: ",", + MinSizeSlice: []string{",", ","}, + MaxSize: ",,", + MaxSizeSlice: []string{",", ","}, + Range: 3, + Email: ",", + Url: ",", + UrlEmpty: "", + InInvalid: "4", + NotIn: "1", + Include: "def", + Exclude: "abc", + }, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"AlphaDash"}, + Classification: "AlphaDashError", + Message: "AlphaDash", + }, + Error{ + FieldNames: []string{"AlphaDashDot"}, + Classification: "AlphaDashDot", + Message: "AlphaDashDot", + }, + Error{ + FieldNames: []string{"Size"}, + Classification: "Size", + Message: "Size", + }, + Error{ + FieldNames: []string{"Size"}, + Classification: "Size", + Message: "Size", + }, + Error{ + FieldNames: []string{"MinSize"}, + Classification: "MinSize", + Message: "MinSize", + }, + Error{ + FieldNames: []string{"MinSize"}, + Classification: "MinSize", + Message: "MinSize", + }, + Error{ + FieldNames: []string{"MaxSize"}, + Classification: "MaxSize", + Message: "MaxSize", + }, + Error{ + FieldNames: []string{"MaxSize"}, + Classification: "MaxSize", + Message: "MaxSize", + }, + Error{ + FieldNames: []string{"Range"}, + Classification: "Range", + Message: "Range", + }, + Error{ + FieldNames: []string{"Email"}, + Classification: "Email", + Message: "Email", + }, + Error{ + FieldNames: []string{"Url"}, + Classification: "Url", + Message: "Url", + }, + Error{ + FieldNames: []string{"Default"}, + Classification: "Default", + Message: "Default", + }, + Error{ + FieldNames: []string{"InInvalid"}, + Classification: "In", + Message: "In", + }, + Error{ + FieldNames: []string{"NotIn"}, + Classification: "NotIn", + Message: "NotIn", + }, + Error{ + FieldNames: []string{"Include"}, + Classification: "Include", + Message: "Include", + }, + Error{ + FieldNames: []string{"Exclude"}, + Classification: "Exclude", + Message: "Exclude", + }, + }, + }, + { + description: "List of valid custom validations", + data: []SadForm{ + SadForm{ + AlphaDash: "123-456", + AlphaDashDot: "123.456", + Size: "1", + SizeSlice: []string{"1"}, + MinSize: "12345", + MinSizeSlice: []string{"1", "2", "3", "4", "5"}, + MaxSize: "1", + MaxSizeSlice: []string{"1"}, + Range: 2, + In: "1", + InInvalid: "1", + Email: "123@456.com", + Url: "http://123.456", + Include: "abc", + }, + }, + }, + { + description: "slice of structs Validation", + data: Group{ + Name: "group1", + People: []Person{ + Person{Name: "anthony"}, + Person{Name: "awoods"}, + }, + }, + expectedErrors: Errors{}, + }, + { + description: "slice of structs Validation failer", + data: Group{ + Name: "group1", + People: []Person{ + Person{Name: "anthony"}, + Person{Name: ""}, + }, + }, + expectedErrors: Errors{ + Error{ + FieldNames: []string{"name"}, + Classification: ERR_REQUIRED, + Message: "Required", + }, + }, + }, +} + +func Test_Validation(t *testing.T) { + Convey("Test validation", t, func() { + for _, testCase := range validationTestCases { + performValidationTest(t, testCase) + } + }) +} + +func performValidationTest(t *testing.T, testCase validationTestCase) { + httpRecorder := httptest.NewRecorder() + m := macaron.Classic() + + m.Post(testRoute, Validate(testCase.data), func(actual Errors) { + So(fmt.Sprintf("%+v", actual), ShouldEqual, fmt.Sprintf("%+v", testCase.expectedErrors)) + }) + + req, err := http.NewRequest("POST", testRoute, nil) + if err != nil { + panic(err) + } + + m.ServeHTTP(httpRecorder, req) + + switch httpRecorder.Code { + case http.StatusNotFound: + panic("Routing is messed up in test fixture (got 404): check methods and paths") + case http.StatusInternalServerError: + panic("Something bad happened on '" + testCase.description + "'") + } +} + +type ( + validationTestCase struct { + description string + data interface{} + expectedErrors Errors + } +) diff --git a/vendor/github.com/go-macaron/inject/.travis.yml b/vendor/github.com/go-macaron/inject/.travis.yml new file mode 100644 index 0000000000..2774fb35d5 --- /dev/null +++ b/vendor/github.com/go-macaron/inject/.travis.yml @@ -0,0 +1,14 @@ +sudo: false +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/vendor/github.com/go-macaron/inject/inject_test.go b/vendor/github.com/go-macaron/inject/inject_test.go new file mode 100644 index 0000000000..7d288d74d6 --- /dev/null +++ b/vendor/github.com/go-macaron/inject/inject_test.go @@ -0,0 +1,285 @@ +// Copyright 2013 Jeremy Saenz +// Copyright 2015 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package inject_test + +import ( + "fmt" + "reflect" + "testing" + + "github.com/go-macaron/inject" + . "github.com/smartystreets/goconvey/convey" +) + +type SpecialString interface { +} + +type TestStruct struct { + Dep1 string `inject:"t" json:"-"` + Dep2 SpecialString `inject` + Dep3 string +} + +type Greeter struct { + Name string +} + +func (g *Greeter) String() string { + return "Hello, My name is" + g.Name +} + +type myFastInvoker func(string) + +func (f myFastInvoker) Invoke([]interface{}) ([]reflect.Value, error) { + return nil, nil +} + +func Test_Injector_Invoke(t *testing.T) { + Convey("Invokes function", t, func() { + injector := inject.New() + So(injector, ShouldNotBeNil) + + dep := "some dependency" + injector.Map(dep) + dep2 := "another dep" + injector.MapTo(dep2, (*SpecialString)(nil)) + dep3 := make(chan *SpecialString) + dep4 := make(chan *SpecialString) + typRecv := reflect.ChanOf(reflect.RecvDir, reflect.TypeOf(dep3).Elem()) + typSend := reflect.ChanOf(reflect.SendDir, reflect.TypeOf(dep4).Elem()) + injector.Set(typRecv, reflect.ValueOf(dep3)) + injector.Set(typSend, reflect.ValueOf(dep4)) + + _, err := injector.Invoke(func(d1 string, d2 SpecialString, d3 <-chan *SpecialString, d4 chan<- *SpecialString) { + So(d1, ShouldEqual, dep) + So(d2, ShouldEqual, dep2) + So(reflect.TypeOf(d3).Elem(), ShouldEqual, reflect.TypeOf(dep3).Elem()) + So(reflect.TypeOf(d4).Elem(), ShouldEqual, reflect.TypeOf(dep4).Elem()) + So(reflect.TypeOf(d3).ChanDir(), ShouldEqual, reflect.RecvDir) + So(reflect.TypeOf(d4).ChanDir(), ShouldEqual, reflect.SendDir) + }) + So(err, ShouldBeNil) + + _, err = injector.Invoke(myFastInvoker(func(string) {})) + So(err, ShouldBeNil) + }) + + Convey("Invokes function with return value", t, func() { + injector := inject.New() + So(injector, ShouldNotBeNil) + + dep := "some dependency" + injector.Map(dep) + dep2 := "another dep" + injector.MapTo(dep2, (*SpecialString)(nil)) + + result, err := injector.Invoke(func(d1 string, d2 SpecialString) string { + So(d1, ShouldEqual, dep) + So(d2, ShouldEqual, dep2) + return "Hello world" + }) + + So(result[0].String(), ShouldEqual, "Hello world") + So(err, ShouldBeNil) + }) +} + +func Test_Injector_Apply(t *testing.T) { + Convey("Apply a type", t, func() { + injector := inject.New() + So(injector, ShouldNotBeNil) + + injector.Map("a dep").MapTo("another dep", (*SpecialString)(nil)) + + s := TestStruct{} + So(injector.Apply(&s), ShouldBeNil) + + So(s.Dep1, ShouldEqual, "a dep") + So(s.Dep2, ShouldEqual, "another dep") + }) +} + +func Test_Injector_InterfaceOf(t *testing.T) { + Convey("Check interface of a type", t, func() { + iType := inject.InterfaceOf((*SpecialString)(nil)) + So(iType.Kind(), ShouldEqual, reflect.Interface) + + iType = inject.InterfaceOf((**SpecialString)(nil)) + So(iType.Kind(), ShouldEqual, reflect.Interface) + + defer func() { + So(recover(), ShouldNotBeNil) + }() + iType = inject.InterfaceOf((*testing.T)(nil)) + }) +} + +func Test_Injector_Set(t *testing.T) { + Convey("Set and get type", t, func() { + injector := inject.New() + So(injector, ShouldNotBeNil) + + typ := reflect.TypeOf("string") + typSend := reflect.ChanOf(reflect.SendDir, typ) + typRecv := reflect.ChanOf(reflect.RecvDir, typ) + + // instantiating unidirectional channels is not possible using reflect + // http://golang.org/src/pkg/reflect/value.go?s=60463:60504#L2064 + chanRecv := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0) + chanSend := reflect.MakeChan(reflect.ChanOf(reflect.BothDir, typ), 0) + + injector.Set(typSend, chanSend) + injector.Set(typRecv, chanRecv) + + So(injector.GetVal(typSend).IsValid(), ShouldBeTrue) + So(injector.GetVal(typRecv).IsValid(), ShouldBeTrue) + So(injector.GetVal(chanSend.Type()).IsValid(), ShouldBeFalse) + }) +} + +func Test_Injector_GetVal(t *testing.T) { + Convey("Map and get type", t, func() { + injector := inject.New() + So(injector, ShouldNotBeNil) + + injector.Map("some dependency") + + So(injector.GetVal(reflect.TypeOf("string")).IsValid(), ShouldBeTrue) + So(injector.GetVal(reflect.TypeOf(11)).IsValid(), ShouldBeFalse) + }) +} + +func Test_Injector_SetParent(t *testing.T) { + Convey("Set parent of injector", t, func() { + injector := inject.New() + So(injector, ShouldNotBeNil) + + injector.MapTo("another dep", (*SpecialString)(nil)) + + injector2 := inject.New() + So(injector, ShouldNotBeNil) + + injector2.SetParent(injector) + + So(injector2.GetVal(inject.InterfaceOf((*SpecialString)(nil))).IsValid(), ShouldBeTrue) + }) +} + +func Test_Injector_Implementors(t *testing.T) { + Convey("Check implementors", t, func() { + injector := inject.New() + So(injector, ShouldNotBeNil) + + g := &Greeter{"Jeremy"} + injector.Map(g) + + So(injector.GetVal(inject.InterfaceOf((*fmt.Stringer)(nil))).IsValid(), ShouldBeTrue) + }) +} + +func Test_FastInvoker(t *testing.T) { + Convey("Check fast invoker", t, func() { + So(inject.IsFastInvoker(myFastInvoker(nil)), ShouldBeTrue) + }) +} + +//----------Benchmark InjectorInvoke------------- + +func f1InjectorInvoke(d1 string, d2 SpecialString) string { + return "f1" +} +func f2InjectorInvoke(d1 string, d2 SpecialString) string { + return "f2" +} + +func f1SimpleInjectorInvoke() { +} +func f2SimpleInjectorInvoke() { +} + +// f2InjectorInvokeHandler f2 Invoke Handler +type f2InjectorInvokeHandler func(d1 string, d2 SpecialString) string + +func (f2 f2InjectorInvokeHandler) Invoke(p []interface{}) ([]reflect.Value, error) { + ret := f2(p[0].(string), p[1].(SpecialString)) + return []reflect.Value{reflect.ValueOf(ret)}, nil +} + +type f2SimpleInjectorInvokeHandler func() + +func (f2 f2SimpleInjectorInvokeHandler) Invoke(p []interface{}) ([]reflect.Value, error) { + f2() + return nil, nil +} + +func BenchmarkInjectorInvokeNative(b *testing.B) { + b.StopTimer() + dep := "some dependency" + dep2 := "another dep" + var d2 SpecialString + d2 = dep2 + + b.StartTimer() + for i := 0; i < b.N; i++ { + f1InjectorInvoke(dep, d2) + } +} + +func BenchmarkInjectorInvokeOriginal(b *testing.B) { + benchmarkInjectorInvoke(b, false, false) +} + +func BenchmarkInjectorInvokeFast(b *testing.B) { + benchmarkInjectorInvoke(b, true, false) +} + +func BenchmarkInjectorInvokeOriginalSimple(b *testing.B) { + benchmarkInjectorInvoke(b, false, true) +} + +func BenchmarkInjectorInvokeFastSimple(b *testing.B) { + benchmarkInjectorInvoke(b, true, true) +} + +func benchmarkInjectorInvoke(b *testing.B, isFast, isSimple bool) { + b.StopTimer() + + injector := inject.New() + dep := "some dependency" + injector.Map(dep) + dep2 := "another dep" + injector.MapTo(dep2, (*SpecialString)(nil)) + + var f1, f2 interface{} + if isSimple { //func() + f1 = f1SimpleInjectorInvoke + f2 = f2SimpleInjectorInvokeHandler(f2SimpleInjectorInvoke) + } else { //func(p1, p2) ret + f1 = f1InjectorInvoke + f2 = f2InjectorInvokeHandler(f2InjectorInvoke) + } + injector.Invoke(f1) + injector.Invoke(f2) + + b.StartTimer() + for i := 0; i < b.N; i++ { + if isFast { + injector.Invoke(f2) + } else { + injector.Invoke(f1) + } + } +} diff --git a/vendor/github.com/gocql/gocql/.gitignore b/vendor/github.com/gocql/gocql/.gitignore new file mode 100644 index 0000000000..bce6cf584a --- /dev/null +++ b/vendor/github.com/gocql/gocql/.gitignore @@ -0,0 +1,5 @@ +gocql-fuzz +fuzz-corpus +fuzz-work +gocql.test +.idea diff --git a/vendor/github.com/gocql/gocql/.travis.yml b/vendor/github.com/gocql/gocql/.travis.yml new file mode 100644 index 0000000000..d75ed13776 --- /dev/null +++ b/vendor/github.com/gocql/gocql/.travis.yml @@ -0,0 +1,45 @@ +language: go + +sudo: required +dist: trusty + +cache: + directories: + - $HOME/.ccm/repository + - $HOME/.local/lib/python2.7 + +matrix: + fast_finish: true + +env: + global: + - GOMAXPROCS=2 + matrix: + - CASS=2.1.12 + AUTH=false + - CASS=2.2.5 + AUTH=true + - CASS=2.2.5 + AUTH=false + - CASS=3.0.8 + AUTH=false + +go: + - 1.7 + - 1.8 + +install: + - pip install --user cql PyYAML six + - git clone https://github.com/pcmanus/ccm.git + - pushd ccm + - ./setup.py install --user + - popd + - go get . + +script: + - set -e + - PATH=$PATH:$HOME/.local/bin bash integration.sh $CASS $AUTH + - go vet . + +notifications: + - email: false diff --git a/vendor/github.com/gocql/gocql/address_translators_test.go b/vendor/github.com/gocql/gocql/address_translators_test.go new file mode 100644 index 0000000000..da27015395 --- /dev/null +++ b/vendor/github.com/gocql/gocql/address_translators_test.go @@ -0,0 +1,34 @@ +package gocql + +import ( + "net" + "testing" +) + +func TestIdentityAddressTranslator_NilAddrAndZeroPort(t *testing.T) { + var tr AddressTranslator = IdentityTranslator() + hostIP := net.ParseIP("") + if hostIP != nil { + t.Errorf("expected host ip to be (nil) but was (%+v) instead", hostIP) + } + + addr, port := tr.Translate(hostIP, 0) + if addr != nil { + t.Errorf("expected translated host to be (nil) but was (%+v) instead", addr) + } + assertEqual(t, "translated port", 0, port) +} + +func TestIdentityAddressTranslator_HostProvided(t *testing.T) { + var tr AddressTranslator = IdentityTranslator() + hostIP := net.ParseIP("10.1.2.3") + if hostIP == nil { + t.Error("expected host ip not to be (nil)") + } + + addr, port := tr.Translate(hostIP, 9042) + if !hostIP.Equal(addr) { + t.Errorf("expected translated addr to be (%+v) but was (%+v) instead", hostIP, addr) + } + assertEqual(t, "translated port", 9042, port) +} diff --git a/vendor/github.com/gocql/gocql/batch_test.go b/vendor/github.com/gocql/gocql/batch_test.go new file mode 100644 index 0000000000..257ced7d24 --- /dev/null +++ b/vendor/github.com/gocql/gocql/batch_test.go @@ -0,0 +1,58 @@ +// +build all integration + +package gocql + +import ( + "testing" + "time" +) + +func TestBatch_Errors(t *testing.T) { + if *flagProto == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + session := createSession(t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.batch_errors (id int primary key, val inet)`); err != nil { + t.Fatal(err) + } + + b := session.NewBatch(LoggedBatch) + b.Query("SELECT * FROM batch_errors WHERE id=2 AND val=?", nil) + if err := session.ExecuteBatch(b); err == nil { + t.Fatal("expected to get error for invalid query in batch") + } +} + +func TestBatch_WithTimestamp(t *testing.T) { + if *flagProto < protoVersion3 { + t.Skip("Batch timestamps are only available on protocol >= 3") + } + + session := createSession(t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.batch_ts (id int primary key, val text)`); err != nil { + t.Fatal(err) + } + + micros := time.Now().UnixNano()/1e3 - 1000 + + b := session.NewBatch(LoggedBatch) + b.WithTimestamp(micros) + b.Query("INSERT INTO batch_ts (id, val) VALUES (?, ?)", 1, "val") + if err := session.ExecuteBatch(b); err != nil { + t.Fatal(err) + } + + var storedTs int64 + if err := session.Query(`SELECT writetime(val) FROM batch_ts WHERE id = ?`, 1).Scan(&storedTs); err != nil { + t.Fatal(err) + } + + if storedTs != micros { + t.Errorf("got ts %d, expected %d", storedTs, micros) + } +} diff --git a/vendor/github.com/gocql/gocql/cass1batch_test.go b/vendor/github.com/gocql/gocql/cass1batch_test.go new file mode 100644 index 0000000000..f5f5c619d9 --- /dev/null +++ b/vendor/github.com/gocql/gocql/cass1batch_test.go @@ -0,0 +1,60 @@ +// +build all integration + +package gocql + +import ( + "strings" + "testing" +) + +func TestProto1BatchInsert(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.large (id int primary key)"); err != nil { + t.Fatal(err) + } + + begin := "BEGIN BATCH" + end := "APPLY BATCH" + query := "INSERT INTO large (id) VALUES (?)" + fullQuery := strings.Join([]string{begin, query, end}, "\n") + args := []interface{}{5} + if err := session.Query(fullQuery, args...).Consistency(Quorum).Exec(); err != nil { + t.Fatal(err) + } +} + +func TestShouldPrepareFunction(t *testing.T) { + var shouldPrepareTests = []struct { + Stmt string + Result bool + }{ + {` + BEGIN BATCH + INSERT INTO users (userID, password) + VALUES ('smith', 'secret') + APPLY BATCH + ; + `, true}, + {`INSERT INTO users (userID, password, name) VALUES ('user2', 'ch@ngem3b', 'second user')`, true}, + {`BEGIN COUNTER BATCH UPDATE stats SET views = views + 1 WHERE pageid = 1 APPLY BATCH`, true}, + {`delete name from users where userID = 'smith';`, true}, + {` UPDATE users SET password = 'secret' WHERE userID = 'smith' `, true}, + {`CREATE TABLE users ( + user_name varchar PRIMARY KEY, + password varchar, + gender varchar, + session_token varchar, + state varchar, + birth_year bigint + );`, false}, + } + + for _, test := range shouldPrepareTests { + q := &Query{stmt: test.Stmt} + if got := q.shouldPrepare(); got != test.Result { + t.Fatalf("%q: got %v, expected %v\n", test.Stmt, got, test.Result) + } + } +} diff --git a/vendor/github.com/gocql/gocql/cassandra_test.go b/vendor/github.com/gocql/gocql/cassandra_test.go new file mode 100644 index 0000000000..cf54d434ca --- /dev/null +++ b/vendor/github.com/gocql/gocql/cassandra_test.go @@ -0,0 +1,2697 @@ +// +build all integration + +package gocql + +import ( + "bytes" + "context" + "io" + "math" + "math/big" + "net" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" + "unicode" + + "gopkg.in/inf.v0" +) + +// TestAuthentication verifies that gocql will work with a host configured to only accept authenticated connections +func TestAuthentication(t *testing.T) { + + if *flagProto < 2 { + t.Skip("Authentication is not supported with protocol < 2") + } + + if !*flagRunAuthTest { + t.Skip("Authentication is not configured in the target cluster") + } + + cluster := createCluster() + + cluster.Authenticator = PasswordAuthenticator{ + Username: "cassandra", + Password: "cassandra", + } + + session, err := cluster.CreateSession() + + if err != nil { + t.Fatalf("Authentication error: %s", err) + } + + session.Close() +} + +//TestRingDiscovery makes sure that you can autodiscover other cluster members when you seed a cluster config with just one node +func TestRingDiscovery(t *testing.T) { + cluster := createCluster() + cluster.Hosts = clusterHosts[:1] + + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if *clusterSize > 1 { + // wait for autodiscovery to update the pool with the list of known hosts + time.Sleep(*flagAutoWait) + } + + session.pool.mu.RLock() + defer session.pool.mu.RUnlock() + size := len(session.pool.hostConnPools) + + if *clusterSize != size { + for p, pool := range session.pool.hostConnPools { + t.Logf("p=%q host=%v ips=%s", p, pool.host, pool.host.ConnectAddress().String()) + + } + t.Errorf("Expected a cluster size of %d, but actual size was %d", *clusterSize, size) + } +} + +func TestEmptyHosts(t *testing.T) { + cluster := createCluster() + cluster.Hosts = nil + if session, err := cluster.CreateSession(); err == nil { + session.Close() + t.Error("expected err, got nil") + } +} + +func TestInvalidPeerEntry(t *testing.T) { + session := createSession(t) + + // rack, release_version, schema_version, tokens are all null + query := session.Query("INSERT into system.peers (peer, data_center, host_id, rpc_address) VALUES (?, ?, ?, ?)", + "169.254.235.45", + "datacenter1", + "35c0ec48-5109-40fd-9281-9e9d4add2f1e", + "169.254.235.45", + ) + + if err := query.Exec(); err != nil { + t.Fatal(err) + } + + session.Close() + + cluster := createCluster() + cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy()) + session = createSessionFromCluster(cluster, t) + defer func() { + session.Query("DELETE from system.peers where peer = ?", "169.254.235.45").Exec() + session.Close() + }() + + // check we can perform a query + iter := session.Query("select peer from system.peers").Iter() + var peer string + for iter.Scan(&peer) { + } + if err := iter.Close(); err != nil { + t.Fatal(err) + } +} + +//TestUseStatementError checks to make sure the correct error is returned when the user tries to execute a use statement. +func TestUseStatementError(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := session.Query("USE gocql_test").Exec(); err != nil { + if err != ErrUseStmt { + t.Fatalf("expected ErrUseStmt, got " + err.Error()) + } + } else { + t.Fatal("expected err, got nil.") + } +} + +//TestInvalidKeyspace checks that an invalid keyspace will return promptly and without a flood of connections +func TestInvalidKeyspace(t *testing.T) { + cluster := createCluster() + cluster.Keyspace = "invalidKeyspace" + session, err := cluster.CreateSession() + if err != nil { + if err != ErrNoConnectionsStarted { + t.Fatalf("Expected ErrNoConnections but got %v", err) + } + } else { + session.Close() //Clean up the session + t.Fatal("expected err, got nil.") + } +} + +func TestTracing(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.trace (id int primary key)`); err != nil { + t.Fatal("create:", err) + } + + buf := &bytes.Buffer{} + trace := NewTraceWriter(session, buf) + + if err := session.Query(`INSERT INTO trace (id) VALUES (?)`, 42).Trace(trace).Exec(); err != nil { + t.Fatal("insert:", err) + } else if buf.Len() == 0 { + t.Fatal("insert: failed to obtain any tracing") + } + buf.Reset() + + var value int + if err := session.Query(`SELECT id FROM trace WHERE id = ?`, 42).Trace(trace).Scan(&value); err != nil { + t.Fatal("select:", err) + } else if value != 42 { + t.Fatalf("value: expected %d, got %d", 42, value) + } else if buf.Len() == 0 { + t.Fatal("select: failed to obtain any tracing") + } + + // also works from session tracer + session.SetTrace(trace) + buf.Reset() + if err := session.Query(`SELECT id FROM trace WHERE id = ?`, 42).Scan(&value); err != nil { + t.Fatal("select:", err) + } + if buf.Len() == 0 { + t.Fatal("select: failed to obtain any tracing") + } +} + +func TestPaging(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("Paging not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, "CREATE TABLE gocql_test.paging (id int primary key)"); err != nil { + t.Fatal("create table:", err) + } + for i := 0; i < 100; i++ { + if err := session.Query("INSERT INTO paging (id) VALUES (?)", i).Exec(); err != nil { + t.Fatal("insert:", err) + } + } + + iter := session.Query("SELECT id FROM paging").PageSize(10).Iter() + var id int + count := 0 + for iter.Scan(&id) { + count++ + } + if err := iter.Close(); err != nil { + t.Fatal("close:", err) + } + if count != 100 { + t.Fatalf("expected %d, got %d", 100, count) + } +} + +func TestCAS(t *testing.T) { + cluster := createCluster() + cluster.SerialConsistency = LocalSerial + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("lightweight transactions not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.cas_table ( + title varchar, + revid timeuuid, + last_modified timestamp, + PRIMARY KEY (title, revid) + )`); err != nil { + t.Fatal("create:", err) + } + + title, revid, modified := "baz", TimeUUID(), time.Now() + var titleCAS string + var revidCAS UUID + var modifiedCAS time.Time + + if applied, err := session.Query(`INSERT INTO cas_table (title, revid, last_modified) + VALUES (?, ?, ?) IF NOT EXISTS`, + title, revid, modified).ScanCAS(&titleCAS, &revidCAS, &modifiedCAS); err != nil { + t.Fatal("insert:", err) + } else if !applied { + t.Fatal("insert should have been applied") + } + + if applied, err := session.Query(`INSERT INTO cas_table (title, revid, last_modified) + VALUES (?, ?, ?) IF NOT EXISTS`, + title, revid, modified).ScanCAS(&titleCAS, &revidCAS, &modifiedCAS); err != nil { + t.Fatal("insert:", err) + } else if applied { + t.Fatal("insert should not have been applied") + } else if title != titleCAS || revid != revidCAS { + t.Fatalf("expected %s/%v/%v but got %s/%v/%v", title, revid, modified, titleCAS, revidCAS, modifiedCAS) + } + + tenSecondsLater := modified.Add(10 * time.Second) + + if applied, err := session.Query(`DELETE FROM cas_table WHERE title = ? and revid = ? IF last_modified = ?`, + title, revid, tenSecondsLater).ScanCAS(&modifiedCAS); err != nil { + t.Fatal("delete:", err) + } else if applied { + t.Fatal("delete should have not been applied") + } + + if modifiedCAS.Unix() != tenSecondsLater.Add(-10*time.Second).Unix() { + t.Fatalf("Was expecting modified CAS to be %v; but was one second later", modifiedCAS.UTC()) + } + + if _, err := session.Query(`DELETE FROM cas_table WHERE title = ? and revid = ? IF last_modified = ?`, + title, revid, tenSecondsLater).ScanCAS(); !strings.HasPrefix(err.Error(), "gocql: not enough columns to scan into") { + t.Fatalf("delete: was expecting count mismatch error but got: %q", err.Error()) + } + + if applied, err := session.Query(`DELETE FROM cas_table WHERE title = ? and revid = ? IF last_modified = ?`, + title, revid, modified).ScanCAS(&modifiedCAS); err != nil { + t.Fatal("delete:", err) + } else if !applied { + t.Fatal("delete should have been applied") + } + + if err := session.Query(`TRUNCATE cas_table`).Exec(); err != nil { + t.Fatal("truncate:", err) + } + + successBatch := session.NewBatch(LoggedBatch) + successBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS", title, revid, modified) + if applied, _, err := session.ExecuteBatchCAS(successBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil { + t.Fatal("insert:", err) + } else if !applied { + t.Fatalf("insert should have been applied: title=%v revID=%v modified=%v", titleCAS, revidCAS, modifiedCAS) + } + + successBatch = session.NewBatch(LoggedBatch) + successBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS", title+"_foo", revid, modified) + casMap := make(map[string]interface{}) + if applied, _, err := session.MapExecuteBatchCAS(successBatch, casMap); err != nil { + t.Fatal("insert:", err) + } else if !applied { + t.Fatal("insert should have been applied") + } + + failBatch := session.NewBatch(LoggedBatch) + failBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES (?, ?, ?) IF NOT EXISTS", title, revid, modified) + if applied, _, err := session.ExecuteBatchCAS(successBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil { + t.Fatal("insert:", err) + } else if applied { + t.Fatalf("insert should have been applied: title=%v revID=%v modified=%v", titleCAS, revidCAS, modifiedCAS) + } + + insertBatch := session.NewBatch(LoggedBatch) + insertBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES ('_foo', 2c3af400-73a4-11e5-9381-29463d90c3f0, DATEOF(NOW()))") + insertBatch.Query("INSERT INTO cas_table (title, revid, last_modified) VALUES ('_foo', 3e4ad2f1-73a4-11e5-9381-29463d90c3f0, DATEOF(NOW()))") + if err := session.ExecuteBatch(insertBatch); err != nil { + t.Fatal("insert:", err) + } + + failBatch = session.NewBatch(LoggedBatch) + failBatch.Query("UPDATE cas_table SET last_modified = DATEOF(NOW()) WHERE title='_foo' AND revid=2c3af400-73a4-11e5-9381-29463d90c3f0 IF last_modified=DATEOF(NOW());") + failBatch.Query("UPDATE cas_table SET last_modified = DATEOF(NOW()) WHERE title='_foo' AND revid=3e4ad2f1-73a4-11e5-9381-29463d90c3f0 IF last_modified=DATEOF(NOW());") + if applied, iter, err := session.ExecuteBatchCAS(failBatch, &titleCAS, &revidCAS, &modifiedCAS); err != nil { + t.Fatal("insert:", err) + } else if applied { + t.Fatalf("insert should have been applied: title=%v revID=%v modified=%v", titleCAS, revidCAS, modifiedCAS) + } else { + if scan := iter.Scan(&applied, &titleCAS, &revidCAS, &modifiedCAS); scan && applied { + t.Fatalf("insert should have been applied: title=%v revID=%v modified=%v", titleCAS, revidCAS, modifiedCAS) + } else if !scan { + t.Fatal("should have scanned another row") + } + if err := iter.Close(); err != nil { + t.Fatal("scan:", err) + } + } +} + +func TestMapScanCAS(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("lightweight transactions not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.cas_table2 ( + title varchar, + revid timeuuid, + last_modified timestamp, + deleted boolean, + PRIMARY KEY (title, revid) + )`); err != nil { + t.Fatal("create:", err) + } + + title, revid, modified, deleted := "baz", TimeUUID(), time.Now(), false + mapCAS := map[string]interface{}{} + + if applied, err := session.Query(`INSERT INTO cas_table2 (title, revid, last_modified, deleted) + VALUES (?, ?, ?, ?) IF NOT EXISTS`, + title, revid, modified, deleted).MapScanCAS(mapCAS); err != nil { + t.Fatal("insert:", err) + } else if !applied { + t.Fatalf("insert should have been applied: title=%v revID=%v modified=%v", title, revid, modified) + } + + mapCAS = map[string]interface{}{} + if applied, err := session.Query(`INSERT INTO cas_table2 (title, revid, last_modified, deleted) + VALUES (?, ?, ?, ?) IF NOT EXISTS`, + title, revid, modified, deleted).MapScanCAS(mapCAS); err != nil { + t.Fatal("insert:", err) + } else if applied { + t.Fatalf("insert should have been applied: title=%v revID=%v modified=%v", title, revid, modified) + } else if title != mapCAS["title"] || revid != mapCAS["revid"] || deleted != mapCAS["deleted"] { + t.Fatalf("expected %s/%v/%v/%v but got %s/%v/%v%v", title, revid, modified, false, mapCAS["title"], mapCAS["revid"], mapCAS["last_modified"], mapCAS["deleted"]) + } + +} + +func TestBatch(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.batch_table (id int primary key)`); err != nil { + t.Fatal("create table:", err) + } + + batch := NewBatch(LoggedBatch) + for i := 0; i < 100; i++ { + batch.Query(`INSERT INTO batch_table (id) VALUES (?)`, i) + } + + if err := session.ExecuteBatch(batch); err != nil { + t.Fatal("execute batch:", err) + } + + count := 0 + if err := session.Query(`SELECT COUNT(*) FROM batch_table`).Scan(&count); err != nil { + t.Fatal("select count:", err) + } else if count != 100 { + t.Fatalf("count: expected %d, got %d\n", 100, count) + } +} + +func TestUnpreparedBatch(t *testing.T) { + t.Skip("FLAKE skipping") + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.batch_unprepared (id int primary key, c counter)`); err != nil { + t.Fatal("create table:", err) + } + + var batch *Batch + if session.cfg.ProtoVersion == 2 { + batch = NewBatch(CounterBatch) + } else { + batch = NewBatch(UnloggedBatch) + } + + for i := 0; i < 100; i++ { + batch.Query(`UPDATE batch_unprepared SET c = c + 1 WHERE id = 1`) + } + + if err := session.ExecuteBatch(batch); err != nil { + t.Fatal("execute batch:", err) + } + + count := 0 + if err := session.Query(`SELECT COUNT(*) FROM batch_unprepared`).Scan(&count); err != nil { + t.Fatal("select count:", err) + } else if count != 1 { + t.Fatalf("count: expected %d, got %d\n", 100, count) + } + + if err := session.Query(`SELECT c FROM batch_unprepared`).Scan(&count); err != nil { + t.Fatal("select count:", err) + } else if count != 100 { + t.Fatalf("count: expected %d, got %d\n", 100, count) + } +} + +// TestBatchLimit tests gocql to make sure batch operations larger than the maximum +// statement limit are not submitted to a cassandra node. +func TestBatchLimit(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.batch_table2 (id int primary key)`); err != nil { + t.Fatal("create table:", err) + } + + batch := NewBatch(LoggedBatch) + for i := 0; i < 65537; i++ { + batch.Query(`INSERT INTO batch_table2 (id) VALUES (?)`, i) + } + if err := session.ExecuteBatch(batch); err != ErrTooManyStmts { + t.Fatal("gocql attempted to execute a batch larger than the support limit of statements.") + } + +} + +func TestWhereIn(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.where_in_table (id int, cluster int, primary key (id,cluster))`); err != nil { + t.Fatal("create table:", err) + } + + if err := session.Query("INSERT INTO where_in_table (id, cluster) VALUES (?,?)", 100, 200).Exec(); err != nil { + t.Fatal("insert:", err) + } + + iter := session.Query("SELECT * FROM where_in_table WHERE id = ? AND cluster IN (?)", 100, 200).Iter() + var id, cluster int + count := 0 + for iter.Scan(&id, &cluster) { + count++ + } + + if id != 100 || cluster != 200 { + t.Fatalf("Was expecting id and cluster to be (100,200) but were (%d,%d)", id, cluster) + } +} + +// TestTooManyQueryArgs tests to make sure the library correctly handles the application level bug +// whereby too many query arguments are passed to a query +func TestTooManyQueryArgs(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.too_many_query_args (id int primary key, value int)`); err != nil { + t.Fatal("create table:", err) + } + + _, err := session.Query(`SELECT * FROM too_many_query_args WHERE id = ?`, 1, 2).Iter().SliceMap() + + if err == nil { + t.Fatal("'`SELECT * FROM too_many_query_args WHERE id = ?`, 1, 2' should return an error") + } + + batch := session.NewBatch(UnloggedBatch) + batch.Query("INSERT INTO too_many_query_args (id, value) VALUES (?, ?)", 1, 2, 3) + err = session.ExecuteBatch(batch) + + if err == nil { + t.Fatal("'`INSERT INTO too_many_query_args (id, value) VALUES (?, ?)`, 1, 2, 3' should return an error") + } + + // TODO: should indicate via an error code that it is an invalid arg? + +} + +// TestNotEnoughQueryArgs tests to make sure the library correctly handles the application level bug +// whereby not enough query arguments are passed to a query +func TestNotEnoughQueryArgs(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.not_enough_query_args (id int, cluster int, value int, primary key (id, cluster))`); err != nil { + t.Fatal("create table:", err) + } + + _, err := session.Query(`SELECT * FROM not_enough_query_args WHERE id = ? and cluster = ?`, 1).Iter().SliceMap() + + if err == nil { + t.Fatal("'`SELECT * FROM not_enough_query_args WHERE id = ? and cluster = ?`, 1' should return an error") + } + + batch := session.NewBatch(UnloggedBatch) + batch.Query("INSERT INTO not_enough_query_args (id, cluster, value) VALUES (?, ?, ?)", 1, 2) + err = session.ExecuteBatch(batch) + + if err == nil { + t.Fatal("'`INSERT INTO not_enough_query_args (id, cluster, value) VALUES (?, ?, ?)`, 1, 2' should return an error") + } +} + +// TestCreateSessionTimeout tests to make sure the CreateSession function timeouts out correctly +// and prevents an infinite loop of connection retries. +func TestCreateSessionTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + select { + case <-time.After(2 * time.Second): + t.Error("no startup timeout") + case <-ctx.Done(): + } + }() + + cluster := createCluster() + cluster.Hosts = []string{"127.0.0.1:1"} + session, err := cluster.CreateSession() + if err == nil { + session.Close() + t.Fatal("expected ErrNoConnectionsStarted, but no error was returned.") + } +} + +func TestReconnection(t *testing.T) { + cluster := createCluster() + cluster.ReconnectInterval = 1 * time.Second + session := createSessionFromCluster(cluster, t) + defer session.Close() + + h := session.ring.allHosts()[0] + session.handleNodeDown(h.ConnectAddress(), h.Port()) + + if h.State() != NodeDown { + t.Fatal("Host should be NodeDown but not.") + } + + time.Sleep(cluster.ReconnectInterval + h.Version().nodeUpDelay() + 1*time.Second) + + if h.State() != NodeUp { + t.Fatal("Host should be NodeUp but not. Failed to reconnect.") + } +} + +type FullName struct { + FirstName string + LastName string +} + +func (n FullName) MarshalCQL(info TypeInfo) ([]byte, error) { + return []byte(n.FirstName + " " + n.LastName), nil +} + +func (n *FullName) UnmarshalCQL(info TypeInfo, data []byte) error { + t := strings.SplitN(string(data), " ", 2) + n.FirstName, n.LastName = t[0], t[1] + return nil +} + +func TestMapScanWithRefMap(t *testing.T) { + session := createSession(t) + defer session.Close() + if err := createTable(session, `CREATE TABLE gocql_test.scan_map_ref_table ( + testtext text PRIMARY KEY, + testfullname text, + testint int, + )`); err != nil { + t.Fatal("create table:", err) + } + m := make(map[string]interface{}) + m["testtext"] = "testtext" + m["testfullname"] = FullName{"John", "Doe"} + m["testint"] = 100 + + if err := session.Query(`INSERT INTO scan_map_ref_table (testtext, testfullname, testint) values (?,?,?)`, + m["testtext"], m["testfullname"], m["testint"]).Exec(); err != nil { + t.Fatal("insert:", err) + } + + var testText string + var testFullName FullName + ret := map[string]interface{}{ + "testtext": &testText, + "testfullname": &testFullName, + // testint is not set here. + } + iter := session.Query(`SELECT * FROM scan_map_ref_table`).Iter() + if ok := iter.MapScan(ret); !ok { + t.Fatal("select:", iter.Close()) + } else { + if ret["testtext"] != "testtext" { + t.Fatal("returned testtext did not match") + } + f := ret["testfullname"].(FullName) + if f.FirstName != "John" || f.LastName != "Doe" { + t.Fatal("returned testfullname did not match") + } + if ret["testint"] != 100 { + t.Fatal("returned testinit did not match") + } + } + if testText != "testtext" { + t.Fatal("returned testtext did not match") + } + if testFullName.FirstName != "John" || testFullName.LastName != "Doe" { + t.Fatal("returned testfullname did not match") + } + + // using MapScan to read a nil int value + intp := new(int64) + ret = map[string]interface{}{ + "testint": &intp, + } + if err := session.Query("INSERT INTO scan_map_ref_table(testtext, testint) VALUES(?, ?)", "null-int", nil).Exec(); err != nil { + t.Fatal(err) + } + err := session.Query(`SELECT testint FROM scan_map_ref_table WHERE testtext = ?`, "null-int").MapScan(ret) + if err != nil { + t.Fatal(err) + } else if v := ret["testint"].(*int64); v != nil { + t.Fatalf("testint should be nil got %+#v", v) + } + +} + +func TestMapScan(t *testing.T) { + session := createSession(t) + defer session.Close() + if err := createTable(session, `CREATE TABLE gocql_test.scan_map_table ( + fullname text PRIMARY KEY, + age int, + address inet, + )`); err != nil { + t.Fatal("create table:", err) + } + + if err := session.Query(`INSERT INTO scan_map_table (fullname, age, address) values (?,?,?)`, + "Grace Hopper", 31, net.ParseIP("10.0.0.1")).Exec(); err != nil { + t.Fatal("insert:", err) + } + if err := session.Query(`INSERT INTO scan_map_table (fullname, age, address) values (?,?,?)`, + "Ada Lovelace", 30, net.ParseIP("10.0.0.2")).Exec(); err != nil { + t.Fatal("insert:", err) + } + + iter := session.Query(`SELECT * FROM scan_map_table`).Iter() + + // First iteration + row := make(map[string]interface{}) + if !iter.MapScan(row) { + t.Fatal("select:", iter.Close()) + } + assertEqual(t, "fullname", "Ada Lovelace", row["fullname"]) + assertEqual(t, "age", 30, row["age"]) + assertEqual(t, "address", "10.0.0.2", row["address"]) + + // Second iteration using a new map + row = make(map[string]interface{}) + if !iter.MapScan(row) { + t.Fatal("select:", iter.Close()) + } + assertEqual(t, "fullname", "Grace Hopper", row["fullname"]) + assertEqual(t, "age", 31, row["age"]) + assertEqual(t, "address", "10.0.0.1", row["address"]) +} + +func TestSliceMap(t *testing.T) { + session := createSession(t) + defer session.Close() + if err := createTable(session, `CREATE TABLE gocql_test.slice_map_table ( + testuuid timeuuid PRIMARY KEY, + testtimestamp timestamp, + testvarchar varchar, + testbigint bigint, + testblob blob, + testbool boolean, + testfloat float, + testdouble double, + testint int, + testdecimal decimal, + testlist list, + testset set, + testmap map, + testvarint varint, + testinet inet + )`); err != nil { + t.Fatal("create table:", err) + } + m := make(map[string]interface{}) + + bigInt := new(big.Int) + if _, ok := bigInt.SetString("830169365738487321165427203929228", 10); !ok { + t.Fatal("Failed setting bigint by string") + } + + m["testuuid"] = TimeUUID() + m["testvarchar"] = "Test VarChar" + m["testbigint"] = time.Now().Unix() + m["testtimestamp"] = time.Now().Truncate(time.Millisecond).UTC() + m["testblob"] = []byte("test blob") + m["testbool"] = true + m["testfloat"] = float32(4.564) + m["testdouble"] = float64(4.815162342) + m["testint"] = 2343 + m["testdecimal"] = inf.NewDec(100, 0) + m["testlist"] = []string{"quux", "foo", "bar", "baz", "quux"} + m["testset"] = []int{1, 2, 3, 4, 5, 6, 7, 8, 9} + m["testmap"] = map[string]string{"field1": "val1", "field2": "val2", "field3": "val3"} + m["testvarint"] = bigInt + m["testinet"] = "213.212.2.19" + sliceMap := []map[string]interface{}{m} + if err := session.Query(`INSERT INTO slice_map_table (testuuid, testtimestamp, testvarchar, testbigint, testblob, testbool, testfloat, testdouble, testint, testdecimal, testlist, testset, testmap, testvarint, testinet) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + m["testuuid"], m["testtimestamp"], m["testvarchar"], m["testbigint"], m["testblob"], m["testbool"], m["testfloat"], m["testdouble"], m["testint"], m["testdecimal"], m["testlist"], m["testset"], m["testmap"], m["testvarint"], m["testinet"]).Exec(); err != nil { + t.Fatal("insert:", err) + } + if returned, retErr := session.Query(`SELECT * FROM slice_map_table`).Iter().SliceMap(); retErr != nil { + t.Fatal("select:", retErr) + } else { + matchSliceMap(t, sliceMap, returned[0]) + } + + // Test for Iter.MapScan() + { + testMap := make(map[string]interface{}) + if !session.Query(`SELECT * FROM slice_map_table`).Iter().MapScan(testMap) { + t.Fatal("MapScan failed to work with one row") + } + matchSliceMap(t, sliceMap, testMap) + } + + // Test for Query.MapScan() + { + testMap := make(map[string]interface{}) + if session.Query(`SELECT * FROM slice_map_table`).MapScan(testMap) != nil { + t.Fatal("MapScan failed to work with one row") + } + matchSliceMap(t, sliceMap, testMap) + } +} +func matchSliceMap(t *testing.T, sliceMap []map[string]interface{}, testMap map[string]interface{}) { + if sliceMap[0]["testuuid"] != testMap["testuuid"] { + t.Fatal("returned testuuid did not match") + } + if sliceMap[0]["testtimestamp"] != testMap["testtimestamp"] { + t.Fatal("returned testtimestamp did not match") + } + if sliceMap[0]["testvarchar"] != testMap["testvarchar"] { + t.Fatal("returned testvarchar did not match") + } + if sliceMap[0]["testbigint"] != testMap["testbigint"] { + t.Fatal("returned testbigint did not match") + } + if !reflect.DeepEqual(sliceMap[0]["testblob"], testMap["testblob"]) { + t.Fatal("returned testblob did not match") + } + if sliceMap[0]["testbool"] != testMap["testbool"] { + t.Fatal("returned testbool did not match") + } + if sliceMap[0]["testfloat"] != testMap["testfloat"] { + t.Fatal("returned testfloat did not match") + } + if sliceMap[0]["testdouble"] != testMap["testdouble"] { + t.Fatal("returned testdouble did not match") + } + if sliceMap[0]["testinet"] != testMap["testinet"] { + t.Fatal("returned testinet did not match") + } + + expectedDecimal := sliceMap[0]["testdecimal"].(*inf.Dec) + returnedDecimal := testMap["testdecimal"].(*inf.Dec) + + if expectedDecimal.Cmp(returnedDecimal) != 0 { + t.Fatal("returned testdecimal did not match") + } + + if !reflect.DeepEqual(sliceMap[0]["testlist"], testMap["testlist"]) { + t.Fatal("returned testlist did not match") + } + if !reflect.DeepEqual(sliceMap[0]["testset"], testMap["testset"]) { + t.Fatal("returned testset did not match") + } + if !reflect.DeepEqual(sliceMap[0]["testmap"], testMap["testmap"]) { + t.Fatal("returned testmap did not match") + } + if sliceMap[0]["testint"] != testMap["testint"] { + t.Fatal("returned testint did not match") + } +} + +func TestSmallInt(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion4 { + t.Skip("smallint is only supported in cassandra 2.2+") + } + + if err := createTable(session, `CREATE TABLE gocql_test.smallint_table ( + testsmallint smallint PRIMARY KEY, + )`); err != nil { + t.Fatal("create table:", err) + } + m := make(map[string]interface{}) + m["testsmallint"] = int16(2) + sliceMap := []map[string]interface{}{m} + if err := session.Query(`INSERT INTO smallint_table (testsmallint) VALUES (?)`, + m["testsmallint"]).Exec(); err != nil { + t.Fatal("insert:", err) + } + if returned, retErr := session.Query(`SELECT * FROM smallint_table`).Iter().SliceMap(); retErr != nil { + t.Fatal("select:", retErr) + } else { + if sliceMap[0]["testsmallint"] != returned[0]["testsmallint"] { + t.Fatal("returned testsmallint did not match") + } + } +} + +func TestScanWithNilArguments(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.scan_with_nil_arguments ( + foo varchar, + bar int, + PRIMARY KEY (foo, bar) + )`); err != nil { + t.Fatal("create:", err) + } + for i := 1; i <= 20; i++ { + if err := session.Query("INSERT INTO scan_with_nil_arguments (foo, bar) VALUES (?, ?)", + "squares", i*i).Exec(); err != nil { + t.Fatal("insert:", err) + } + } + + iter := session.Query("SELECT * FROM scan_with_nil_arguments WHERE foo = ?", "squares").Iter() + var n int + count := 0 + for iter.Scan(nil, &n) { + count += n + } + if err := iter.Close(); err != nil { + t.Fatal("close:", err) + } + if count != 2870 { + t.Fatalf("expected %d, got %d", 2870, count) + } +} + +func TestScanCASWithNilArguments(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("lightweight transactions not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, `CREATE TABLE gocql_test.scan_cas_with_nil_arguments ( + foo varchar, + bar varchar, + PRIMARY KEY (foo, bar) + )`); err != nil { + t.Fatal("create:", err) + } + + foo := "baz" + var cas string + + if applied, err := session.Query(`INSERT INTO scan_cas_with_nil_arguments (foo, bar) + VALUES (?, ?) IF NOT EXISTS`, + foo, foo).ScanCAS(nil, nil); err != nil { + t.Fatal("insert:", err) + } else if !applied { + t.Fatal("insert should have been applied") + } + + if applied, err := session.Query(`INSERT INTO scan_cas_with_nil_arguments (foo, bar) + VALUES (?, ?) IF NOT EXISTS`, + foo, foo).ScanCAS(&cas, nil); err != nil { + t.Fatal("insert:", err) + } else if applied { + t.Fatal("insert should not have been applied") + } else if foo != cas { + t.Fatalf("expected %v but got %v", foo, cas) + } + + if applied, err := session.Query(`INSERT INTO scan_cas_with_nil_arguments (foo, bar) + VALUES (?, ?) IF NOT EXISTS`, + foo, foo).ScanCAS(nil, &cas); err != nil { + t.Fatal("insert:", err) + } else if applied { + t.Fatal("insert should not have been applied") + } else if foo != cas { + t.Fatalf("expected %v but got %v", foo, cas) + } +} + +func TestRebindQueryInfo(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.rebind_query (id int, value text, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + if err := session.Query("INSERT INTO rebind_query (id, value) VALUES (?, ?)", 23, "quux").Exec(); err != nil { + t.Fatalf("insert into rebind_query failed, err '%v'", err) + } + + if err := session.Query("INSERT INTO rebind_query (id, value) VALUES (?, ?)", 24, "w00t").Exec(); err != nil { + t.Fatalf("insert into rebind_query failed, err '%v'", err) + } + + q := session.Query("SELECT value FROM rebind_query WHERE ID = ?") + q.Bind(23) + + iter := q.Iter() + var value string + for iter.Scan(&value) { + } + + if value != "quux" { + t.Fatalf("expected %v but got %v", "quux", value) + } + + q.Bind(24) + iter = q.Iter() + + for iter.Scan(&value) { + } + + if value != "w00t" { + t.Fatalf("expected %v but got %v", "quux", value) + } +} + +//TestStaticQueryInfo makes sure that the application can manually bind query parameters using the simplest possible static binding strategy +func TestStaticQueryInfo(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.static_query_info (id int, value text, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + if err := session.Query("INSERT INTO static_query_info (id, value) VALUES (?, ?)", 113, "foo").Exec(); err != nil { + t.Fatalf("insert into static_query_info failed, err '%v'", err) + } + + autobinder := func(q *QueryInfo) ([]interface{}, error) { + values := make([]interface{}, 1) + values[0] = 113 + return values, nil + } + + qry := session.Bind("SELECT id, value FROM static_query_info WHERE id = ?", autobinder) + + if err := qry.Exec(); err != nil { + t.Fatalf("expose query info failed, error '%v'", err) + } + + iter := qry.Iter() + + var id int + var value string + + iter.Scan(&id, &value) + + if err := iter.Close(); err != nil { + t.Fatalf("query with exposed info failed, err '%v'", err) + } + + if value != "foo" { + t.Fatalf("Expected value %s, but got %s", "foo", value) + } + +} + +type ClusteredKeyValue struct { + Id int + Cluster int + Value string +} + +func (kv *ClusteredKeyValue) Bind(q *QueryInfo) ([]interface{}, error) { + values := make([]interface{}, len(q.Args)) + + for i, info := range q.Args { + fieldName := upcaseInitial(info.Name) + value := reflect.ValueOf(kv) + field := reflect.Indirect(value).FieldByName(fieldName) + values[i] = field.Addr().Interface() + } + + return values, nil +} + +func upcaseInitial(str string) string { + for i, v := range str { + return string(unicode.ToUpper(v)) + str[i+1:] + } + return "" +} + +//TestBoundQueryInfo makes sure that the application can manually bind query parameters using the query meta data supplied at runtime +func TestBoundQueryInfo(t *testing.T) { + + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.clustered_query_info (id int, cluster int, value text, PRIMARY KEY (id, cluster))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + write := &ClusteredKeyValue{Id: 200, Cluster: 300, Value: "baz"} + + insert := session.Bind("INSERT INTO clustered_query_info (id, cluster, value) VALUES (?, ?,?)", write.Bind) + + if err := insert.Exec(); err != nil { + t.Fatalf("insert into clustered_query_info failed, err '%v'", err) + } + + read := &ClusteredKeyValue{Id: 200, Cluster: 300} + + qry := session.Bind("SELECT id, cluster, value FROM clustered_query_info WHERE id = ? and cluster = ?", read.Bind) + + iter := qry.Iter() + + var id, cluster int + var value string + + iter.Scan(&id, &cluster, &value) + + if err := iter.Close(); err != nil { + t.Fatalf("query with clustered_query_info info failed, err '%v'", err) + } + + if value != "baz" { + t.Fatalf("Expected value %s, but got %s", "baz", value) + } + +} + +//TestBatchQueryInfo makes sure that the application can manually bind query parameters when executing in a batch +func TestBatchQueryInfo(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, "CREATE TABLE gocql_test.batch_query_info (id int, cluster int, value text, PRIMARY KEY (id, cluster))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + write := func(q *QueryInfo) ([]interface{}, error) { + values := make([]interface{}, 3) + values[0] = 4000 + values[1] = 5000 + values[2] = "bar" + return values, nil + } + + batch := session.NewBatch(LoggedBatch) + batch.Bind("INSERT INTO batch_query_info (id, cluster, value) VALUES (?, ?,?)", write) + + if err := session.ExecuteBatch(batch); err != nil { + t.Fatalf("batch insert into batch_query_info failed, err '%v'", err) + } + + read := func(q *QueryInfo) ([]interface{}, error) { + values := make([]interface{}, 2) + values[0] = 4000 + values[1] = 5000 + return values, nil + } + + qry := session.Bind("SELECT id, cluster, value FROM batch_query_info WHERE id = ? and cluster = ?", read) + + iter := qry.Iter() + + var id, cluster int + var value string + + iter.Scan(&id, &cluster, &value) + + if err := iter.Close(); err != nil { + t.Fatalf("query with batch_query_info info failed, err '%v'", err) + } + + if value != "bar" { + t.Fatalf("Expected value %s, but got %s", "bar", value) + } +} + +func getRandomConn(t *testing.T, session *Session) *Conn { + conn := session.getConn() + if conn == nil { + t.Fatal("unable to get a connection") + } + return conn +} + +func injectInvalidPreparedStatement(t *testing.T, session *Session, table string) (string, *Conn) { + if err := createTable(session, `CREATE TABLE gocql_test.`+table+` ( + foo varchar, + bar int, + PRIMARY KEY (foo, bar) + )`); err != nil { + t.Fatal("create:", err) + } + + stmt := "INSERT INTO " + table + " (foo, bar) VALUES (?, 7)" + + conn := getRandomConn(t, session) + + flight := new(inflightPrepare) + key := session.stmtsLRU.keyFor(conn.addr, "", stmt) + session.stmtsLRU.add(key, flight) + + flight.preparedStatment = &preparedStatment{ + id: []byte{'f', 'o', 'o', 'b', 'a', 'r'}, + request: preparedMetadata{ + resultMetadata: resultMetadata{ + colCount: 1, + actualColCount: 1, + columns: []ColumnInfo{ + { + Keyspace: "gocql_test", + Table: table, + Name: "foo", + TypeInfo: NativeType{ + typ: TypeVarchar, + }, + }, + }, + }, + }, + } + + return stmt, conn +} + +func TestPrepare_MissingSchemaPrepare(t *testing.T) { + s := createSession(t) + conn := getRandomConn(t, s) + defer s.Close() + + insertQry := &Query{stmt: "INSERT INTO invalidschemaprep (val) VALUES (?)", values: []interface{}{5}, cons: s.cons, + session: s, pageSize: s.pageSize, trace: s.trace, + prefetch: s.prefetch, rt: s.cfg.RetryPolicy} + + if err := conn.executeQuery(insertQry).err; err == nil { + t.Fatal("expected error, but got nil.") + } + + if err := createTable(s, "CREATE TABLE gocql_test.invalidschemaprep (val int, PRIMARY KEY (val))"); err != nil { + t.Fatal("create table:", err) + } + + if err := conn.executeQuery(insertQry).err; err != nil { + t.Fatal(err) // unconfigured columnfamily + } +} + +func TestPrepare_ReprepareStatement(t *testing.T) { + session := createSession(t) + defer session.Close() + stmt, conn := injectInvalidPreparedStatement(t, session, "test_reprepare_statement") + query := session.Query(stmt, "bar") + if err := conn.executeQuery(query).Close(); err != nil { + t.Fatalf("Failed to execute query for reprepare statement: %v", err) + } +} + +func TestPrepare_ReprepareBatch(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + stmt, conn := injectInvalidPreparedStatement(t, session, "test_reprepare_statement_batch") + batch := session.NewBatch(UnloggedBatch) + batch.Query(stmt, "bar") + if err := conn.executeBatch(batch).Close(); err != nil { + t.Fatalf("Failed to execute query for reprepare statement: %v", err) + } +} + +func TestQueryInfo(t *testing.T) { + session := createSession(t) + defer session.Close() + + conn := getRandomConn(t, session) + info, err := conn.prepareStatement(context.Background(), "SELECT release_version, host_id FROM system.local WHERE key = ?", nil) + + if err != nil { + t.Fatalf("Failed to execute query for preparing statement: %v", err) + } + + if x := len(info.request.columns); x != 1 { + t.Fatalf("Was not expecting meta data for %d query arguments, but got %d\n", 1, x) + } + + if session.cfg.ProtoVersion > 1 { + if x := len(info.response.columns); x != 2 { + t.Fatalf("Was not expecting meta data for %d result columns, but got %d\n", 2, x) + } + } +} + +//TestPreparedCacheEviction will make sure that the cache size is maintained +func TestPrepare_PreparedCacheEviction(t *testing.T) { + const maxPrepared = 4 + + host := clusterHosts[0] + cluster := createCluster() + cluster.MaxPreparedStmts = maxPrepared + cluster.Events.DisableSchemaEvents = true + cluster.Hosts = []string{host} + + cluster.HostFilter = WhiteListHostFilter(host) + + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.prepcachetest (id int,mod int,PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + // clear the cache + session.stmtsLRU.clear() + + //Fill the table + for i := 0; i < 2; i++ { + if err := session.Query("INSERT INTO prepcachetest (id,mod) VALUES (?, ?)", i, 10000%(i+1)).Exec(); err != nil { + t.Fatalf("insert into prepcachetest failed, err '%v'", err) + } + } + //Populate the prepared statement cache with select statements + var id, mod int + for i := 0; i < 2; i++ { + err := session.Query("SELECT id,mod FROM prepcachetest WHERE id = "+strconv.FormatInt(int64(i), 10)).Scan(&id, &mod) + if err != nil { + t.Fatalf("select from prepcachetest failed, error '%v'", err) + } + } + + //generate an update statement to test they are prepared + err := session.Query("UPDATE prepcachetest SET mod = ? WHERE id = ?", 1, 11).Exec() + if err != nil { + t.Fatalf("update prepcachetest failed, error '%v'", err) + } + + //generate a delete statement to test they are prepared + err = session.Query("DELETE FROM prepcachetest WHERE id = ?", 1).Exec() + if err != nil { + t.Fatalf("delete from prepcachetest failed, error '%v'", err) + } + + //generate an insert statement to test they are prepared + err = session.Query("INSERT INTO prepcachetest (id,mod) VALUES (?, ?)", 3, 11).Exec() + if err != nil { + t.Fatalf("insert into prepcachetest failed, error '%v'", err) + } + + session.stmtsLRU.mu.Lock() + defer session.stmtsLRU.mu.Unlock() + + //Make sure the cache size is maintained + if session.stmtsLRU.lru.Len() != session.stmtsLRU.lru.MaxEntries { + t.Fatalf("expected cache size of %v, got %v", session.stmtsLRU.lru.MaxEntries, session.stmtsLRU.lru.Len()) + } + + // Walk through all the configured hosts and test cache retention and eviction + for _, host := range session.cfg.Hosts { + _, ok := session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host+":9042", session.cfg.Keyspace, "SELECT id,mod FROM prepcachetest WHERE id = 0")) + if ok { + t.Errorf("expected first select to be purged but was in cache for host=%q", host) + } + + _, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host+":9042", session.cfg.Keyspace, "SELECT id,mod FROM prepcachetest WHERE id = 1")) + if !ok { + t.Errorf("exepected second select to be in cache for host=%q", host) + } + + _, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host+":9042", session.cfg.Keyspace, "INSERT INTO prepcachetest (id,mod) VALUES (?, ?)")) + if !ok { + t.Errorf("expected insert to be in cache for host=%q", host) + } + + _, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host+":9042", session.cfg.Keyspace, "UPDATE prepcachetest SET mod = ? WHERE id = ?")) + if !ok { + t.Errorf("expected update to be in cached for host=%q", host) + } + + _, ok = session.stmtsLRU.lru.Get(session.stmtsLRU.keyFor(host+":9042", session.cfg.Keyspace, "DELETE FROM prepcachetest WHERE id = ?")) + if !ok { + t.Errorf("expected delete to be cached for host=%q", host) + } + } +} + +func TestPrepare_PreparedCacheKey(t *testing.T) { + session := createSession(t) + defer session.Close() + + // create a second keyspace + cluster2 := createCluster() + createKeyspace(t, cluster2, "gocql_test2") + cluster2.Keyspace = "gocql_test2" + session2, err := cluster2.CreateSession() + if err != nil { + t.Fatal("create session:", err) + } + defer session2.Close() + + // both keyspaces have a table named "test_stmt_cache_key" + if err := createTable(session, "CREATE TABLE gocql_test.test_stmt_cache_key (id varchar primary key, field varchar)"); err != nil { + t.Fatal("create table:", err) + } + if err := createTable(session2, "CREATE TABLE gocql_test2.test_stmt_cache_key (id varchar primary key, field varchar)"); err != nil { + t.Fatal("create table:", err) + } + + // both tables have a single row with the same partition key but different column value + if err = session.Query(`INSERT INTO test_stmt_cache_key (id, field) VALUES (?, ?)`, "key", "one").Exec(); err != nil { + t.Fatal("insert:", err) + } + if err = session2.Query(`INSERT INTO test_stmt_cache_key (id, field) VALUES (?, ?)`, "key", "two").Exec(); err != nil { + t.Fatal("insert:", err) + } + + // should be able to see different values in each keyspace + var value string + if err = session.Query("SELECT field FROM test_stmt_cache_key WHERE id = ?", "key").Scan(&value); err != nil { + t.Fatal("select:", err) + } + if value != "one" { + t.Errorf("Expected one, got %s", value) + } + + if err = session2.Query("SELECT field FROM test_stmt_cache_key WHERE id = ?", "key").Scan(&value); err != nil { + t.Fatal("select:", err) + } + if value != "two" { + t.Errorf("Expected two, got %s", value) + } +} + +//TestMarshalFloat64Ptr tests to see that a pointer to a float64 is marshalled correctly. +func TestMarshalFloat64Ptr(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.float_test (id double, test double, primary key (id))"); err != nil { + t.Fatal("create table:", err) + } + testNum := float64(7500) + if err := session.Query(`INSERT INTO float_test (id,test) VALUES (?,?)`, float64(7500.00), &testNum).Exec(); err != nil { + t.Fatal("insert float64:", err) + } +} + +//TestMarshalInet tests to see that a pointer to a float64 is marshalled correctly. +func TestMarshalInet(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.inet_test (ip inet, name text, primary key (ip))"); err != nil { + t.Fatal("create table:", err) + } + stringIp := "123.34.45.56" + if err := session.Query(`INSERT INTO inet_test (ip,name) VALUES (?,?)`, stringIp, "Test IP 1").Exec(); err != nil { + t.Fatal("insert string inet:", err) + } + var stringResult string + if err := session.Query("SELECT ip FROM inet_test").Scan(&stringResult); err != nil { + t.Fatalf("select for string from inet_test 1 failed: %v", err) + } + if stringResult != stringIp { + t.Errorf("Expected %s, was %s", stringIp, stringResult) + } + + var ipResult net.IP + if err := session.Query("SELECT ip FROM inet_test").Scan(&ipResult); err != nil { + t.Fatalf("select for net.IP from inet_test 1 failed: %v", err) + } + if ipResult.String() != stringIp { + t.Errorf("Expected %s, was %s", stringIp, ipResult.String()) + } + + if err := session.Query(`DELETE FROM inet_test WHERE ip = ?`, stringIp).Exec(); err != nil { + t.Fatal("delete inet table:", err) + } + + netIp := net.ParseIP("222.43.54.65") + if err := session.Query(`INSERT INTO inet_test (ip,name) VALUES (?,?)`, netIp, "Test IP 2").Exec(); err != nil { + t.Fatal("insert netIp inet:", err) + } + + if err := session.Query("SELECT ip FROM inet_test").Scan(&stringResult); err != nil { + t.Fatalf("select for string from inet_test 2 failed: %v", err) + } + if stringResult != netIp.String() { + t.Errorf("Expected %s, was %s", netIp.String(), stringResult) + } + if err := session.Query("SELECT ip FROM inet_test").Scan(&ipResult); err != nil { + t.Fatalf("select for net.IP from inet_test 2 failed: %v", err) + } + if ipResult.String() != netIp.String() { + t.Errorf("Expected %s, was %s", netIp.String(), ipResult.String()) + } + +} + +func TestVarint(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.varint_test (id varchar, test varint, test2 varint, primary key (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", 0).Exec(); err != nil { + t.Fatalf("insert varint: %v", err) + } + + var result int + if err := session.Query("SELECT test FROM varint_test").Scan(&result); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if result != 0 { + t.Errorf("Expected 0, was %d", result) + } + + if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", -1).Exec(); err != nil { + t.Fatalf("insert varint: %v", err) + } + + if err := session.Query("SELECT test FROM varint_test").Scan(&result); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if result != -1 { + t.Errorf("Expected -1, was %d", result) + } + + if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", nil).Exec(); err != nil { + t.Fatalf("insert varint: %v", err) + } + + if err := session.Query("SELECT test FROM varint_test").Scan(&result); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if result != 0 { + t.Errorf("Expected 0, was %d", result) + } + + var nullableResult *int + + if err := session.Query("SELECT test FROM varint_test").Scan(&nullableResult); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if nullableResult != nil { + t.Errorf("Expected nil, was %d", nullableResult) + } + + if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", int64(math.MaxInt32)+1).Exec(); err != nil { + t.Fatalf("insert varint: %v", err) + } + + var result64 int64 + if err := session.Query("SELECT test FROM varint_test").Scan(&result64); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if result64 != int64(math.MaxInt32)+1 { + t.Errorf("Expected %d, was %d", int64(math.MaxInt32)+1, result64) + } + + biggie := new(big.Int) + biggie.SetString("36893488147419103232", 10) // > 2**64 + if err := session.Query(`INSERT INTO varint_test (id, test) VALUES (?, ?)`, "id", biggie).Exec(); err != nil { + t.Fatalf("insert varint: %v", err) + } + + resultBig := new(big.Int) + if err := session.Query("SELECT test FROM varint_test").Scan(resultBig); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if resultBig.String() != biggie.String() { + t.Errorf("Expected %s, was %s", biggie.String(), resultBig.String()) + } + + err := session.Query("SELECT test FROM varint_test").Scan(&result64) + if err == nil || strings.Index(err.Error(), "out of range") == -1 { + t.Errorf("expected out of range error since value is too big for int64") + } + + // value not set in cassandra, leave bind variable empty + resultBig = new(big.Int) + if err := session.Query("SELECT test2 FROM varint_test").Scan(resultBig); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if resultBig.Int64() != 0 { + t.Errorf("Expected %s, was %s", biggie.String(), resultBig.String()) + } + + // can use double pointer to explicitly detect value is not set in cassandra + if err := session.Query("SELECT test2 FROM varint_test").Scan(&resultBig); err != nil { + t.Fatalf("select from varint_test failed: %v", err) + } + + if resultBig != nil { + t.Errorf("Expected %v, was %v", nil, *resultBig) + } +} + +//TestQueryStats confirms that the stats are returning valid data. Accuracy may be questionable. +func TestQueryStats(t *testing.T) { + session := createSession(t) + defer session.Close() + qry := session.Query("SELECT * FROM system.peers") + if err := qry.Exec(); err != nil { + t.Fatalf("query failed. %v", err) + } else { + if qry.Attempts() < 1 { + t.Fatal("expected at least 1 attempt, but got 0") + } + if qry.Latency() <= 0 { + t.Fatalf("expected latency to be greater than 0, but got %v instead.", qry.Latency()) + } + } +} + +// TestIterHosts confirms that host is added to Iter when the query succeeds. +func TestIterHost(t *testing.T) { + session := createSession(t) + defer session.Close() + iter := session.Query("SELECT * FROM system.peers").Iter() + + // check if Host method works + if iter.Host() == nil { + t.Error("No host in iter") + } +} + +//TestBatchStats confirms that the stats are returning valid data. Accuracy may be questionable. +func TestBatchStats(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion == 1 { + t.Skip("atomic batches not supported. Please use Cassandra >= 2.0") + } + + if err := createTable(session, "CREATE TABLE gocql_test.batchStats (id int, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + b := session.NewBatch(LoggedBatch) + b.Query("INSERT INTO batchStats (id) VALUES (?)", 1) + b.Query("INSERT INTO batchStats (id) VALUES (?)", 2) + + if err := session.ExecuteBatch(b); err != nil { + t.Fatalf("query failed. %v", err) + } else { + if b.Attempts() < 1 { + t.Fatal("expected at least 1 attempt, but got 0") + } + if b.Latency() <= 0 { + t.Fatalf("expected latency to be greater than 0, but got %v instead.", b.Latency()) + } + } +} + +//TestNilInQuery tests to see that a nil value passed to a query is handled by Cassandra +//TODO validate the nil value by reading back the nil. Need to fix Unmarshalling. +func TestNilInQuery(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.testNilInsert (id int, count int, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + if err := session.Query("INSERT INTO testNilInsert (id,count) VALUES (?,?)", 1, nil).Exec(); err != nil { + t.Fatalf("failed to insert with err: %v", err) + } + + var id int + + if err := session.Query("SELECT id FROM testNilInsert").Scan(&id); err != nil { + t.Fatalf("failed to select with err: %v", err) + } else if id != 1 { + t.Fatalf("expected id to be 1, got %v", id) + } +} + +// Don't initialize time.Time bind variable if cassandra timestamp column is empty +func TestEmptyTimestamp(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.test_empty_timestamp (id int, time timestamp, num int, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + if err := session.Query("INSERT INTO test_empty_timestamp (id, num) VALUES (?,?)", 1, 561).Exec(); err != nil { + t.Fatalf("failed to insert with err: %v", err) + } + + var timeVal time.Time + + if err := session.Query("SELECT time FROM test_empty_timestamp where id = ?", 1).Scan(&timeVal); err != nil { + t.Fatalf("failed to select with err: %v", err) + } + + if !timeVal.IsZero() { + t.Errorf("time.Time bind variable should still be empty (was %s)", timeVal) + } +} + +// Integration test of just querying for data from the system.schema_keyspace table where the keyspace DOES exist. +func TestGetKeyspaceMetadata(t *testing.T) { + session := createSession(t) + defer session.Close() + + keyspaceMetadata, err := getKeyspaceMetadata(session, "gocql_test") + if err != nil { + t.Fatalf("failed to query the keyspace metadata with err: %v", err) + } + if keyspaceMetadata == nil { + t.Fatal("failed to query the keyspace metadata, nil returned") + } + if keyspaceMetadata.Name != "gocql_test" { + t.Errorf("Expected keyspace name to be 'gocql' but was '%s'", keyspaceMetadata.Name) + } + if keyspaceMetadata.StrategyClass != "org.apache.cassandra.locator.SimpleStrategy" { + t.Errorf("Expected replication strategy class to be 'org.apache.cassandra.locator.SimpleStrategy' but was '%s'", keyspaceMetadata.StrategyClass) + } + if keyspaceMetadata.StrategyOptions == nil { + t.Error("Expected replication strategy options map but was nil") + } + rfStr, ok := keyspaceMetadata.StrategyOptions["replication_factor"] + if !ok { + t.Fatalf("Expected strategy option 'replication_factor' but was not found in %v", keyspaceMetadata.StrategyOptions) + } + rfInt, err := strconv.Atoi(rfStr.(string)) + if err != nil { + t.Fatalf("Error converting string to int with err: %v", err) + } + if rfInt != *flagRF { + t.Errorf("Expected replication factor to be %d but was %d", *flagRF, rfInt) + } +} + +// Integration test of just querying for data from the system.schema_keyspace table where the keyspace DOES NOT exist. +func TestGetKeyspaceMetadataFails(t *testing.T) { + session := createSession(t) + defer session.Close() + + _, err := getKeyspaceMetadata(session, "gocql_keyspace_does_not_exist") + + if err != ErrKeyspaceDoesNotExist || err == nil { + t.Fatalf("Expected error of type ErrKeySpaceDoesNotExist. Instead, error was %v", err) + } +} + +// Integration test of just querying for data from the system.schema_columnfamilies table +func TestGetTableMetadata(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.test_table_metadata (first_id int, second_id int, third_id int, PRIMARY KEY (first_id, second_id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + tables, err := getTableMetadata(session, "gocql_test") + if err != nil { + t.Fatalf("failed to query the table metadata with err: %v", err) + } + if tables == nil { + t.Fatal("failed to query the table metadata, nil returned") + } + + var testTable *TableMetadata + + // verify all tables have minimum expected data + for i := range tables { + table := &tables[i] + + if table.Name == "" { + t.Errorf("Expected table name to be set, but it was empty: index=%d metadata=%+v", i, table) + } + if table.Keyspace != "gocql_test" { + t.Errorf("Expected keyspace for '%s' table metadata to be 'gocql_test' but was '%s'", table.Name, table.Keyspace) + } + if session.cfg.ProtoVersion < 4 { + // TODO(zariel): there has to be a better way to detect what metadata version + // we are in, and a better way to structure the code so that it is abstracted away + // from us here + if table.KeyValidator == "" { + t.Errorf("Expected key validator to be set for table %s", table.Name) + } + if table.Comparator == "" { + t.Errorf("Expected comparator to be set for table %s", table.Name) + } + if table.DefaultValidator == "" { + t.Errorf("Expected default validator to be set for table %s", table.Name) + } + } + + // these fields are not set until the metadata is compiled + if table.PartitionKey != nil { + t.Errorf("Did not expect partition key for table %s", table.Name) + } + if table.ClusteringColumns != nil { + t.Errorf("Did not expect clustering columns for table %s", table.Name) + } + if table.Columns != nil { + t.Errorf("Did not expect columns for table %s", table.Name) + } + + // for the next part of the test after this loop, find the metadata for the test table + if table.Name == "test_table_metadata" { + testTable = table + } + } + + // verify actual values on the test tables + if testTable == nil { + t.Fatal("Expected table metadata for name 'test_table_metadata'") + } + if session.cfg.ProtoVersion == protoVersion1 { + if testTable.KeyValidator != "org.apache.cassandra.db.marshal.Int32Type" { + t.Errorf("Expected test_table_metadata key validator to be 'org.apache.cassandra.db.marshal.Int32Type' but was '%s'", testTable.KeyValidator) + } + if testTable.Comparator != "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type)" { + t.Errorf("Expected test_table_metadata key validator to be 'org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.UTF8Type)' but was '%s'", testTable.Comparator) + } + if testTable.DefaultValidator != "org.apache.cassandra.db.marshal.BytesType" { + t.Errorf("Expected test_table_metadata key validator to be 'org.apache.cassandra.db.marshal.BytesType' but was '%s'", testTable.DefaultValidator) + } + expectedKeyAliases := []string{"first_id"} + if !reflect.DeepEqual(testTable.KeyAliases, expectedKeyAliases) { + t.Errorf("Expected key aliases %v but was %v", expectedKeyAliases, testTable.KeyAliases) + } + expectedColumnAliases := []string{"second_id"} + if !reflect.DeepEqual(testTable.ColumnAliases, expectedColumnAliases) { + t.Errorf("Expected key aliases %v but was %v", expectedColumnAliases, testTable.ColumnAliases) + } + } + if testTable.ValueAlias != "" { + t.Errorf("Expected value alias '' but was '%s'", testTable.ValueAlias) + } +} + +// Integration test of just querying for data from the system.schema_columns table +func TestGetColumnMetadata(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.test_column_metadata (first_id int, second_id int, third_id int, PRIMARY KEY (first_id, second_id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + if err := session.Query("CREATE INDEX index_column_metadata ON test_column_metadata ( third_id )").Exec(); err != nil { + t.Fatalf("failed to create index with err: %v", err) + } + + columns, err := getColumnMetadata(session, "gocql_test") + if err != nil { + t.Fatalf("failed to query column metadata with err: %v", err) + } + if columns == nil { + t.Fatal("failed to query column metadata, nil returned") + } + + testColumns := map[string]*ColumnMetadata{} + + // verify actual values on the test columns + for i := range columns { + column := &columns[i] + + if column.Name == "" { + t.Errorf("Expected column name to be set, but it was empty: index=%d metadata=%+v", i, column) + } + if column.Table == "" { + t.Errorf("Expected column %s table name to be set, but it was empty", column.Name) + } + if column.Keyspace != "gocql_test" { + t.Errorf("Expected column %s keyspace name to be 'gocql_test', but it was '%s'", column.Name, column.Keyspace) + } + if column.Kind == ColumnUnkownKind { + t.Errorf("Expected column %s kind to be set, but it was empty", column.Name) + } + if session.cfg.ProtoVersion == 1 && column.Kind != ColumnRegular { + t.Errorf("Expected column %s kind to be set to 'regular' for proto V1 but it was '%s'", column.Name, column.Kind) + } + if column.Validator == "" { + t.Errorf("Expected column %s validator to be set, but it was empty", column.Name) + } + + // find the test table columns for the next step after this loop + if column.Table == "test_column_metadata" { + testColumns[column.Name] = column + } + } + + if session.cfg.ProtoVersion == 1 { + // V1 proto only returns "regular columns" + if len(testColumns) != 1 { + t.Errorf("Expected 1 test columns but there were %d", len(testColumns)) + } + thirdID, found := testColumns["third_id"] + if !found { + t.Fatalf("Expected to find column 'third_id' metadata but there was only %v", testColumns) + } + + if thirdID.Kind != ColumnRegular { + t.Errorf("Expected %s column kind to be '%s' but it was '%s'", thirdID.Name, ColumnRegular, thirdID.Kind) + } + + if thirdID.Index.Name != "index_column_metadata" { + t.Errorf("Expected %s column index name to be 'index_column_metadata' but it was '%s'", thirdID.Name, thirdID.Index.Name) + } + } else { + if len(testColumns) != 3 { + t.Errorf("Expected 3 test columns but there were %d", len(testColumns)) + } + firstID, found := testColumns["first_id"] + if !found { + t.Fatalf("Expected to find column 'first_id' metadata but there was only %v", testColumns) + } + secondID, found := testColumns["second_id"] + if !found { + t.Fatalf("Expected to find column 'second_id' metadata but there was only %v", testColumns) + } + thirdID, found := testColumns["third_id"] + if !found { + t.Fatalf("Expected to find column 'third_id' metadata but there was only %v", testColumns) + } + + if firstID.Kind != ColumnPartitionKey { + t.Errorf("Expected %s column kind to be '%s' but it was '%s'", firstID.Name, ColumnPartitionKey, firstID.Kind) + } + if secondID.Kind != ColumnClusteringKey { + t.Errorf("Expected %s column kind to be '%s' but it was '%s'", secondID.Name, ColumnClusteringKey, secondID.Kind) + } + if thirdID.Kind != ColumnRegular { + t.Errorf("Expected %s column kind to be '%s' but it was '%s'", thirdID.Name, ColumnRegular, thirdID.Kind) + } + + if !session.useSystemSchema && thirdID.Index.Name != "index_column_metadata" { + // TODO(zariel): update metadata to scan index from system_schema + t.Errorf("Expected %s column index name to be 'index_column_metadata' but it was '%s'", thirdID.Name, thirdID.Index.Name) + } + } +} + +// Integration test of querying and composition the keyspace metadata +func TestKeyspaceMetadata(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.test_metadata (first_id int, second_id int, third_id int, PRIMARY KEY (first_id, second_id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + if err := session.Query("CREATE INDEX index_metadata ON test_metadata ( third_id )").Exec(); err != nil { + t.Fatalf("failed to create index with err: %v", err) + } + + keyspaceMetadata, err := session.KeyspaceMetadata("gocql_test") + if err != nil { + t.Fatalf("failed to query keyspace metadata with err: %v", err) + } + if keyspaceMetadata == nil { + t.Fatal("expected the keyspace metadata to not be nil, but it was nil") + } + if keyspaceMetadata.Name != session.cfg.Keyspace { + t.Fatalf("Expected the keyspace name to be %s but was %s", session.cfg.Keyspace, keyspaceMetadata.Name) + } + if len(keyspaceMetadata.Tables) == 0 { + t.Errorf("Expected tables but there were none") + } + + tableMetadata, found := keyspaceMetadata.Tables["test_metadata"] + if !found { + t.Fatalf("failed to find the test_metadata table metadata") + } + + if len(tableMetadata.PartitionKey) != 1 { + t.Errorf("expected partition key length of 1, but was %d", len(tableMetadata.PartitionKey)) + } + for i, column := range tableMetadata.PartitionKey { + if column == nil { + t.Errorf("partition key column metadata at index %d was nil", i) + } + } + if tableMetadata.PartitionKey[0].Name != "first_id" { + t.Errorf("Expected the first partition key column to be 'first_id' but was '%s'", tableMetadata.PartitionKey[0].Name) + } + if len(tableMetadata.ClusteringColumns) != 1 { + t.Fatalf("expected clustering columns length of 1, but was %d", len(tableMetadata.ClusteringColumns)) + } + for i, column := range tableMetadata.ClusteringColumns { + if column == nil { + t.Fatalf("clustering column metadata at index %d was nil", i) + } + } + if tableMetadata.ClusteringColumns[0].Name != "second_id" { + t.Errorf("Expected the first clustering column to be 'second_id' but was '%s'", tableMetadata.ClusteringColumns[0].Name) + } + thirdColumn, found := tableMetadata.Columns["third_id"] + if !found { + t.Fatalf("Expected a column definition for 'third_id'") + } + if !session.useSystemSchema && thirdColumn.Index.Name != "index_metadata" { + // TODO(zariel): scan index info from system_schema + t.Errorf("Expected column index named 'index_metadata' but was '%s'", thirdColumn.Index.Name) + } +} + +// Integration test of the routing key calculation +func TestRoutingKey(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.test_single_routing_key (first_id int, second_id int, PRIMARY KEY (first_id, second_id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + if err := createTable(session, "CREATE TABLE gocql_test.test_composite_routing_key (first_id int, second_id int, PRIMARY KEY ((first_id, second_id)))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + routingKeyInfo, err := session.routingKeyInfo(context.Background(), "SELECT * FROM test_single_routing_key WHERE second_id=? AND first_id=?") + if err != nil { + t.Fatalf("failed to get routing key info due to error: %v", err) + } + if routingKeyInfo == nil { + t.Fatal("Expected routing key info, but was nil") + } + if len(routingKeyInfo.indexes) != 1 { + t.Fatalf("Expected routing key indexes length to be 1 but was %d", len(routingKeyInfo.indexes)) + } + if routingKeyInfo.indexes[0] != 1 { + t.Errorf("Expected routing key index[0] to be 1 but was %d", routingKeyInfo.indexes[0]) + } + if len(routingKeyInfo.types) != 1 { + t.Fatalf("Expected routing key types length to be 1 but was %d", len(routingKeyInfo.types)) + } + if routingKeyInfo.types[0] == nil { + t.Fatal("Expected routing key types[0] to be non-nil") + } + if routingKeyInfo.types[0].Type() != TypeInt { + t.Fatalf("Expected routing key types[0].Type to be %v but was %v", TypeInt, routingKeyInfo.types[0].Type()) + } + + // verify the cache is working + routingKeyInfo, err = session.routingKeyInfo(context.Background(), "SELECT * FROM test_single_routing_key WHERE second_id=? AND first_id=?") + if err != nil { + t.Fatalf("failed to get routing key info due to error: %v", err) + } + if len(routingKeyInfo.indexes) != 1 { + t.Fatalf("Expected routing key indexes length to be 1 but was %d", len(routingKeyInfo.indexes)) + } + if routingKeyInfo.indexes[0] != 1 { + t.Errorf("Expected routing key index[0] to be 1 but was %d", routingKeyInfo.indexes[0]) + } + if len(routingKeyInfo.types) != 1 { + t.Fatalf("Expected routing key types length to be 1 but was %d", len(routingKeyInfo.types)) + } + if routingKeyInfo.types[0] == nil { + t.Fatal("Expected routing key types[0] to be non-nil") + } + if routingKeyInfo.types[0].Type() != TypeInt { + t.Fatalf("Expected routing key types[0] to be %v but was %v", TypeInt, routingKeyInfo.types[0].Type()) + } + cacheSize := session.routingKeyInfoCache.lru.Len() + if cacheSize != 1 { + t.Errorf("Expected cache size to be 1 but was %d", cacheSize) + } + + query := session.Query("SELECT * FROM test_single_routing_key WHERE second_id=? AND first_id=?", 1, 2) + routingKey, err := query.GetRoutingKey() + if err != nil { + t.Fatalf("Failed to get routing key due to error: %v", err) + } + expectedRoutingKey := []byte{0, 0, 0, 2} + if !reflect.DeepEqual(expectedRoutingKey, routingKey) { + t.Errorf("Expected routing key %v but was %v", expectedRoutingKey, routingKey) + } + + routingKeyInfo, err = session.routingKeyInfo(context.Background(), "SELECT * FROM test_composite_routing_key WHERE second_id=? AND first_id=?") + if err != nil { + t.Fatalf("failed to get routing key info due to error: %v", err) + } + if routingKeyInfo == nil { + t.Fatal("Expected routing key info, but was nil") + } + if len(routingKeyInfo.indexes) != 2 { + t.Fatalf("Expected routing key indexes length to be 2 but was %d", len(routingKeyInfo.indexes)) + } + if routingKeyInfo.indexes[0] != 1 { + t.Errorf("Expected routing key index[0] to be 1 but was %d", routingKeyInfo.indexes[0]) + } + if routingKeyInfo.indexes[1] != 0 { + t.Errorf("Expected routing key index[1] to be 0 but was %d", routingKeyInfo.indexes[1]) + } + if len(routingKeyInfo.types) != 2 { + t.Fatalf("Expected routing key types length to be 1 but was %d", len(routingKeyInfo.types)) + } + if routingKeyInfo.types[0] == nil { + t.Fatal("Expected routing key types[0] to be non-nil") + } + if routingKeyInfo.types[0].Type() != TypeInt { + t.Fatalf("Expected routing key types[0] to be %v but was %v", TypeInt, routingKeyInfo.types[0].Type()) + } + if routingKeyInfo.types[1] == nil { + t.Fatal("Expected routing key types[1] to be non-nil") + } + if routingKeyInfo.types[1].Type() != TypeInt { + t.Fatalf("Expected routing key types[0] to be %v but was %v", TypeInt, routingKeyInfo.types[1].Type()) + } + + query = session.Query("SELECT * FROM test_composite_routing_key WHERE second_id=? AND first_id=?", 1, 2) + routingKey, err = query.GetRoutingKey() + if err != nil { + t.Fatalf("Failed to get routing key due to error: %v", err) + } + expectedRoutingKey = []byte{0, 4, 0, 0, 0, 2, 0, 0, 4, 0, 0, 0, 1, 0} + if !reflect.DeepEqual(expectedRoutingKey, routingKey) { + t.Errorf("Expected routing key %v but was %v", expectedRoutingKey, routingKey) + } + + // verify the cache is working + cacheSize = session.routingKeyInfoCache.lru.Len() + if cacheSize != 2 { + t.Errorf("Expected cache size to be 2 but was %d", cacheSize) + } +} + +// Integration test of the token-aware policy-based connection pool +func TestTokenAwareConnPool(t *testing.T) { + cluster := createCluster() + cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy()) + + // force metadata query to page + cluster.PageSize = 1 + + session := createSessionFromCluster(cluster, t) + defer session.Close() + + expectedPoolSize := cluster.NumConns * len(session.ring.allHosts()) + + // wait for pool to fill + for i := 0; i < 10; i++ { + if session.pool.Size() == expectedPoolSize { + break + } + time.Sleep(100 * time.Millisecond) + } + + if expectedPoolSize != session.pool.Size() { + t.Errorf("Expected pool size %d but was %d", expectedPoolSize, session.pool.Size()) + } + + // add another cf so there are two pages when fetching table metadata from our keyspace + if err := createTable(session, "CREATE TABLE gocql_test.test_token_aware_other_cf (id int, data text, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create test_token_aware table with err: %v", err) + } + + if err := createTable(session, "CREATE TABLE gocql_test.test_token_aware (id int, data text, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create test_token_aware table with err: %v", err) + } + query := session.Query("INSERT INTO test_token_aware (id, data) VALUES (?,?)", 42, "8 * 6 =") + if err := query.Exec(); err != nil { + t.Fatalf("failed to insert with err: %v", err) + } + + query = session.Query("SELECT data FROM test_token_aware where id = ?", 42).Consistency(One) + var data string + if err := query.Scan(&data); err != nil { + t.Error(err) + } + + // TODO add verification that the query went to the correct host +} + +func TestNegativeStream(t *testing.T) { + session := createSession(t) + defer session.Close() + + conn := getRandomConn(t, session) + + const stream = -50 + writer := frameWriterFunc(func(f *framer, streamID int) error { + f.writeHeader(0, opOptions, stream) + return f.finishWrite() + }) + + frame, err := conn.exec(context.Background(), writer, nil) + if err == nil { + t.Fatalf("expected to get an error on stream %d", stream) + } else if frame != nil { + t.Fatalf("expected to get nil frame got %+v", frame) + } +} + +func TestManualQueryPaging(t *testing.T) { + const rowsToInsert = 5 + + session := createSession(t) + defer session.Close() + + if err := createTable(session, "CREATE TABLE gocql_test.testManualPaging (id int, count int, PRIMARY KEY (id))"); err != nil { + t.Fatal(err) + } + + for i := 0; i < rowsToInsert; i++ { + err := session.Query("INSERT INTO testManualPaging(id, count) VALUES(?, ?)", i, i*i).Exec() + if err != nil { + t.Fatal(err) + } + } + + // disable auto paging, 1 page per iteration + query := session.Query("SELECT id, count FROM testManualPaging").PageState(nil).PageSize(2) + var id, count, fetched int + + iter := query.Iter() + // NOTE: this isnt very indicative of how it should be used, the idea is that + // the page state is returned to some client who will send it back to manually + // page through the results. + for { + for iter.Scan(&id, &count) { + if count != (id * id) { + t.Fatalf("got wrong value from iteration: got %d expected %d", count, id*id) + } + + fetched++ + } + + if len(iter.PageState()) > 0 { + // more pages + iter = query.PageState(iter.PageState()).Iter() + } else { + break + } + } + + if err := iter.Close(); err != nil { + t.Fatal(err) + } + + if fetched != rowsToInsert { + t.Fatalf("expected to fetch %d rows got %d", rowsToInsert, fetched) + } +} + +func TestLexicalUUIDType(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.test_lexical_uuid ( + key varchar, + column1 'org.apache.cassandra.db.marshal.LexicalUUIDType', + value int, + PRIMARY KEY (key, column1) + )`); err != nil { + t.Fatal("create:", err) + } + + key := TimeUUID().String() + column1 := TimeUUID() + + err := session.Query("INSERT INTO test_lexical_uuid(key, column1, value) VALUES(?, ?, ?)", key, column1, 55).Exec() + if err != nil { + t.Fatal(err) + } + + var gotUUID UUID + if err := session.Query("SELECT column1 from test_lexical_uuid where key = ? AND column1 = ?", key, column1).Scan(&gotUUID); err != nil { + t.Fatal(err) + } + + if gotUUID != column1 { + t.Errorf("got %s, expected %s", gotUUID, column1) + } +} + +// Issue 475 +func TestSessionBindRoutingKey(t *testing.T) { + cluster := createCluster() + cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy()) + + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.test_bind_routing_key ( + key varchar, + value int, + PRIMARY KEY (key) + )`); err != nil { + + t.Fatal(err) + } + + const ( + key = "routing-key" + value = 5 + ) + + fn := func(info *QueryInfo) ([]interface{}, error) { + return []interface{}{key, value}, nil + } + + q := session.Bind("INSERT INTO test_bind_routing_key(key, value) VALUES(?, ?)", fn) + if err := q.Exec(); err != nil { + t.Fatal(err) + } +} + +func TestJSONSupport(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < 4 { + t.Skip("skipping JSON support on proto < 4") + } + + if err := createTable(session, `CREATE TABLE gocql_test.test_json ( + id text PRIMARY KEY, + age int, + state text + )`); err != nil { + + t.Fatal(err) + } + + err := session.Query("INSERT INTO test_json JSON ?", `{"id": "user123", "age": 42, "state": "TX"}`).Exec() + if err != nil { + t.Fatal(err) + } + + var ( + id string + age int + state string + ) + + err = session.Query("SELECT id, age, state FROM test_json WHERE id = ?", "user123").Scan(&id, &age, &state) + if err != nil { + t.Fatal(err) + } + + if id != "user123" { + t.Errorf("got id %q expected %q", id, "user123") + } + if age != 42 { + t.Errorf("got age %d expected %d", age, 42) + } + if state != "TX" { + t.Errorf("got state %q expected %q", state, "TX") + } +} + +func TestUDF(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < 4 { + t.Skip("skipping UDF support on proto < 4") + } + + const query = `CREATE OR REPLACE FUNCTION uniq(state set, val text) + CALLED ON NULL INPUT RETURNS set LANGUAGE java + AS 'state.add(val); return state;'` + + err := session.Query(query).Exec() + if err != nil { + t.Fatal(err) + } +} + +func TestDiscoverViaProxy(t *testing.T) { + // This (complicated) test tests that when the driver is given an initial host + // that is infact a proxy it discovers the rest of the ring behind the proxy + // and does not store the proxies address as a host in its connection pool. + // See https://github.com/gocql/gocql/issues/481 + proxy, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatalf("unable to create proxy listener: %v", err) + } + + var ( + wg sync.WaitGroup + mu sync.Mutex + proxyConns []net.Conn + closed bool + ) + + go func(wg *sync.WaitGroup) { + cassandraAddr := JoinHostPort(clusterHosts[0], 9042) + + cassandra := func() (net.Conn, error) { + return net.Dial("tcp", cassandraAddr) + } + + proxyFn := func(wg *sync.WaitGroup, from, to net.Conn) { + defer wg.Done() + + _, err := io.Copy(to, from) + if err != nil { + mu.Lock() + if !closed { + t.Error(err) + } + mu.Unlock() + } + } + + // handle dials cassandra and then proxies requests and reponsess. It waits + // for both the read and write side of the TCP connection to close before + // returning. + handle := func(conn net.Conn) error { + defer conn.Close() + + cass, err := cassandra() + if err != nil { + return err + } + + mu.Lock() + proxyConns = append(proxyConns, cass) + mu.Unlock() + + defer cass.Close() + + var wg sync.WaitGroup + wg.Add(1) + go proxyFn(&wg, conn, cass) + + wg.Add(1) + go proxyFn(&wg, cass, conn) + + wg.Wait() + + return nil + } + + for { + // proxy just accepts connections and then proxies them to cassandra, + // it runs until it is closed. + conn, err := proxy.Accept() + if err != nil { + mu.Lock() + if !closed { + t.Error(err) + } + mu.Unlock() + return + } + + mu.Lock() + proxyConns = append(proxyConns, conn) + mu.Unlock() + + wg.Add(1) + go func(conn net.Conn) { + defer wg.Done() + + if err := handle(conn); err != nil { + t.Error(err) + return + } + }(conn) + } + }(&wg) + + defer wg.Wait() + + proxyAddr := proxy.Addr().String() + + cluster := createCluster() + cluster.NumConns = 1 + // initial host is the proxy address + cluster.Hosts = []string{proxyAddr} + + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if session.hostSource.localHost.BroadcastAddress() == nil { + t.Skip("Target cluster does not have broadcast_address in system.local.") + goto close + } + + // we shouldnt need this but to be safe + time.Sleep(1 * time.Second) + + session.pool.mu.RLock() + for _, host := range clusterHosts { + if _, ok := session.pool.hostConnPools[host]; !ok { + t.Errorf("missing host in pool after discovery: %q", host) + } + } + session.pool.mu.RUnlock() + +close: + mu.Lock() + closed = true + if err := proxy.Close(); err != nil { + t.Log(err) + } + + for _, conn := range proxyConns { + if err := conn.Close(); err != nil { + t.Log(err) + } + } + mu.Unlock() +} + +func TestUnmarshallNestedTypes(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("can not have frozen types in cassandra < 2.1.3") + } + + if err := createTable(session, `CREATE TABLE gocql_test.test_557 ( + id text PRIMARY KEY, + val list > > + )`); err != nil { + + t.Fatal(err) + } + + m := []map[string]string{ + {"key1": "val1"}, + {"key2": "val2"}, + } + + const id = "key" + err := session.Query("INSERT INTO test_557(id, val) VALUES(?, ?)", id, m).Exec() + if err != nil { + t.Fatal(err) + } + + var data []map[string]string + if err := session.Query("SELECT val FROM test_557 WHERE id = ?", id).Scan(&data); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(data, m) { + t.Fatalf("%+#v != %+#v", data, m) + } +} + +func TestSchemaReset(t *testing.T) { + if flagCassVersion.Major == 0 || (flagCassVersion.Before(2, 1, 3)) { + t.Skipf("skipping TestSchemaReset due to CASSANDRA-7910 in Cassandra <2.1.3 version=%v", flagCassVersion) + } + + cluster := createCluster() + cluster.NumConns = 1 + + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.test_schema_reset ( + id text PRIMARY KEY)`); err != nil { + + t.Fatal(err) + } + + const key = "test" + + err := session.Query("INSERT INTO test_schema_reset(id) VALUES(?)", key).Exec() + if err != nil { + t.Fatal(err) + } + + var id string + err = session.Query("SELECT * FROM test_schema_reset WHERE id=?", key).Scan(&id) + if err != nil { + t.Fatal(err) + } else if id != key { + t.Fatalf("expected to get id=%q got=%q", key, id) + } + + if err := createTable(session, `ALTER TABLE gocql_test.test_schema_reset ADD val text`); err != nil { + t.Fatal(err) + } + + const expVal = "test-val" + err = session.Query("INSERT INTO test_schema_reset(id, val) VALUES(?, ?)", key, expVal).Exec() + if err != nil { + t.Fatal(err) + } + + var val string + err = session.Query("SELECT * FROM test_schema_reset WHERE id=?", key).Scan(&id, &val) + if err != nil { + t.Fatal(err) + } + + if id != key { + t.Errorf("expected to get id=%q got=%q", key, id) + } + if val != expVal { + t.Errorf("expected to get val=%q got=%q", expVal, val) + } +} + +func TestCreateSession_DontSwallowError(t *testing.T) { + t.Skip("This test is bad, and the resultant error from cassandra changes between versions") + cluster := createCluster() + cluster.ProtoVersion = 0x100 + session, err := cluster.CreateSession() + if err == nil { + session.Close() + + t.Fatal("expected to get an error for unsupported protocol") + } + + if flagCassVersion.Major < 3 { + // TODO: we should get a distinct error type here which include the underlying + // cassandra error about the protocol version, for now check this here. + if !strings.Contains(err.Error(), "Invalid or unsupported protocol version") { + t.Fatalf(`expcted to get error "unsupported protocol version" got: %q`, err) + } + } else { + if !strings.Contains(err.Error(), "unsupported response version") { + t.Fatalf(`expcted to get error "unsupported response version" got: %q`, err) + } + } +} + +func TestControl_DiscoverProtocol(t *testing.T) { + cluster := createCluster() + cluster.ProtoVersion = 0 + + session, err := cluster.CreateSession() + if err != nil { + t.Fatal(err) + } + defer session.Close() + + if session.cfg.ProtoVersion == 0 { + t.Fatal("did not discovery protocol") + } +} + +// TestUnsetCol verify unset column will not replace an existing column +func TestUnsetCol(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < 4 { + t.Skip("Unset Values are not supported in protocol < 4") + } + + if err := createTable(session, "CREATE TABLE gocql_test.testUnsetInsert (id int, my_int int, my_text text, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + if err := session.Query("INSERT INTO testUnSetInsert (id,my_int,my_text) VALUES (?,?,?)", 1, 2, "3").Exec(); err != nil { + t.Fatalf("failed to insert with err: %v", err) + } + if err := session.Query("INSERT INTO testUnSetInsert (id,my_int,my_text) VALUES (?,?,?)", 1, UnsetValue, UnsetValue).Exec(); err != nil { + t.Fatalf("failed to insert with err: %v", err) + } + + var id, mInt int + var mText string + + if err := session.Query("SELECT id, my_int ,my_text FROM testUnsetInsert").Scan(&id, &mInt, &mText); err != nil { + t.Fatalf("failed to select with err: %v", err) + } else if id != 1 || mInt != 2 || mText != "3" { + t.Fatalf("Expected results: 1, 2, \"3\", got %v, %v, %v", id, mInt, mText) + } +} + +// TestUnsetColBatch verify unset column will not replace a column in batch +func TestUnsetColBatch(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < 4 { + t.Skip("Unset Values are not supported in protocol < 4") + } + + if err := createTable(session, "CREATE TABLE gocql_test.batchUnsetInsert (id int, my_int int, my_text text, PRIMARY KEY (id))"); err != nil { + t.Fatalf("failed to create table with error '%v'", err) + } + + b := session.NewBatch(LoggedBatch) + b.Query("INSERT INTO gocql_test.batchUnsetInsert(id, my_int, my_text) VALUES (?,?,?)", 1, 1, UnsetValue) + b.Query("INSERT INTO gocql_test.batchUnsetInsert(id, my_int, my_text) VALUES (?,?,?)", 1, UnsetValue, "") + b.Query("INSERT INTO gocql_test.batchUnsetInsert(id, my_int, my_text) VALUES (?,?,?)", 2, 2, UnsetValue) + + if err := session.ExecuteBatch(b); err != nil { + t.Fatalf("query failed. %v", err) + } else { + if b.Attempts() < 1 { + t.Fatal("expected at least 1 attempt, but got 0") + } + if b.Latency() <= 0 { + t.Fatalf("expected latency to be greater than 0, but got %v instead.", b.Latency()) + } + } + var id, mInt, count int + var mText string + + if err := session.Query("SELECT count(*) FROM gocql_test.batchUnsetInsert;").Scan(&count); err != nil { + t.Fatalf("Failed to select with err: %v", err) + } else if count != 2 { + t.Fatalf("Expected Batch Insert count 2, got %v", count) + } + + if err := session.Query("SELECT id, my_int ,my_text FROM gocql_test.batchUnsetInsert where id=1;").Scan(&id, &mInt, &mText); err != nil { + t.Fatalf("failed to select with err: %v", err) + } else if id != mInt { + t.Fatalf("expected id, my_int to be 1, got %v and %v", id, mInt) + } +} diff --git a/vendor/github.com/gocql/gocql/cluster_test.go b/vendor/github.com/gocql/gocql/cluster_test.go new file mode 100644 index 0000000000..10e557b3b9 --- /dev/null +++ b/vendor/github.com/gocql/gocql/cluster_test.go @@ -0,0 +1,53 @@ +package gocql + +import ( + "testing" + "time" + "net" +) + +func TestNewCluster_Defaults(t *testing.T) { + cfg := NewCluster() + assertEqual(t, "cluster config cql version", "3.0.0", cfg.CQLVersion) + assertEqual(t, "cluster config timeout", 600*time.Millisecond, cfg.Timeout) + assertEqual(t, "cluster config port", 9042, cfg.Port) + assertEqual(t, "cluster config num-conns", 2, cfg.NumConns) + assertEqual(t, "cluster config consistency", Quorum, cfg.Consistency) + assertEqual(t, "cluster config max prepared statements", defaultMaxPreparedStmts, cfg.MaxPreparedStmts) + assertEqual(t, "cluster config max routing key info", 1000, cfg.MaxRoutingKeyInfo) + assertEqual(t, "cluster config page-size", 5000, cfg.PageSize) + assertEqual(t, "cluster config default timestamp", true, cfg.DefaultTimestamp) + assertEqual(t, "cluster config max wait schema agreement", 60*time.Second, cfg.MaxWaitSchemaAgreement) + assertEqual(t, "cluster config reconnect interval", 60*time.Second, cfg.ReconnectInterval) +} + +func TestNewCluster_WithHosts(t *testing.T) { + cfg := NewCluster("addr1", "addr2") + assertEqual(t, "cluster config hosts length", 2, len(cfg.Hosts)) + assertEqual(t, "cluster config host 0", "addr1", cfg.Hosts[0]) + assertEqual(t, "cluster config host 1", "addr2", cfg.Hosts[1]) +} + +func TestClusterConfig_translateAddressAndPort_NilTranslator(t *testing.T) { + cfg := NewCluster() + assertNil(t, "cluster config address translator", cfg.AddressTranslator) + newAddr, newPort := cfg.translateAddressPort(net.ParseIP("10.0.0.1"), 1234) + assertTrue(t, "same address as provided", net.ParseIP("10.0.0.1").Equal(newAddr)) + assertEqual(t, "translated host and port", 1234, newPort) +} + +func TestClusterConfig_translateAddressAndPort_EmptyAddr(t *testing.T) { + cfg := NewCluster() + cfg.AddressTranslator = staticAddressTranslator(net.ParseIP("10.10.10.10"), 5432) + newAddr, newPort := cfg.translateAddressPort(net.IP([]byte{}), 0) + assertTrue(t, "translated address is still empty", len(newAddr) == 0) + assertEqual(t, "translated port", 0, newPort) +} + +func TestClusterConfig_translateAddressAndPort_Success(t *testing.T) { + cfg := NewCluster() + cfg.AddressTranslator = staticAddressTranslator(net.ParseIP("10.10.10.10"), 5432) + newAddr, newPort := cfg.translateAddressPort(net.ParseIP("10.0.0.1"), 2345) + assertTrue(t, "translated address", net.ParseIP("10.10.10.10").Equal(newAddr)) + assertEqual(t, "translated port", 5432, newPort) +} diff --git a/vendor/github.com/gocql/gocql/common_test.go b/vendor/github.com/gocql/gocql/common_test.go new file mode 100644 index 0000000000..724863785f --- /dev/null +++ b/vendor/github.com/gocql/gocql/common_test.go @@ -0,0 +1,196 @@ +package gocql + +import ( + "flag" + "fmt" + "log" + "net" + "strings" + "sync" + "testing" + "time" +) + +var ( + flagCluster = flag.String("cluster", "127.0.0.1", "a comma-separated list of host:port tuples") + flagProto = flag.Int("proto", 0, "protcol version") + flagCQL = flag.String("cql", "3.0.0", "CQL version") + flagRF = flag.Int("rf", 1, "replication factor for test keyspace") + clusterSize = flag.Int("clusterSize", 1, "the expected size of the cluster") + flagRetry = flag.Int("retries", 5, "number of times to retry queries") + flagAutoWait = flag.Duration("autowait", 1000*time.Millisecond, "time to wait for autodiscovery to fill the hosts poll") + flagRunSslTest = flag.Bool("runssl", false, "Set to true to run ssl test") + flagRunAuthTest = flag.Bool("runauth", false, "Set to true to run authentication test") + flagCompressTest = flag.String("compressor", "", "compressor to use") + flagTimeout = flag.Duration("gocql.timeout", 5*time.Second, "sets the connection `timeout` for all operations") + + flagCassVersion cassVersion + clusterHosts []string +) + +func init() { + flag.Var(&flagCassVersion, "gocql.cversion", "the cassandra version being tested against") + + flag.Parse() + clusterHosts = strings.Split(*flagCluster, ",") + log.SetFlags(log.Lshortfile | log.LstdFlags) +} + +func addSslOptions(cluster *ClusterConfig) *ClusterConfig { + if *flagRunSslTest { + cluster.SslOpts = &SslOptions{ + CertPath: "testdata/pki/gocql.crt", + KeyPath: "testdata/pki/gocql.key", + CaPath: "testdata/pki/ca.crt", + EnableHostVerification: false, + } + } + return cluster +} + +var initOnce sync.Once + +func createTable(s *Session, table string) error { + // lets just be really sure + if err := s.control.awaitSchemaAgreement(); err != nil { + log.Printf("error waiting for schema agreement pre create table=%q err=%v\n", table, err) + return err + } + + if err := s.Query(table).RetryPolicy(nil).Exec(); err != nil { + log.Printf("error creating table table=%q err=%v\n", table, err) + return err + } + + if err := s.control.awaitSchemaAgreement(); err != nil { + log.Printf("error waiting for schema agreement post create table=%q err=%v\n", table, err) + return err + } + + return nil +} + +func createCluster() *ClusterConfig { + cluster := NewCluster(clusterHosts...) + cluster.ProtoVersion = *flagProto + cluster.CQLVersion = *flagCQL + cluster.Timeout = *flagTimeout + cluster.Consistency = Quorum + cluster.MaxWaitSchemaAgreement = 2 * time.Minute // travis might be slow + if *flagRetry > 0 { + cluster.RetryPolicy = &SimpleRetryPolicy{NumRetries: *flagRetry} + } + + switch *flagCompressTest { + case "snappy": + cluster.Compressor = &SnappyCompressor{} + case "": + default: + panic("invalid compressor: " + *flagCompressTest) + } + + cluster = addSslOptions(cluster) + return cluster +} + +func createKeyspace(tb testing.TB, cluster *ClusterConfig, keyspace string) { + c := *cluster + c.Keyspace = "system" + c.Timeout = 30 * time.Second + session, err := c.CreateSession() + if err != nil { + panic(err) + } + defer session.Close() + defer tb.Log("closing keyspace session") + + err = createTable(session, `DROP KEYSPACE IF EXISTS `+keyspace) + if err != nil { + panic(fmt.Sprintf("unable to drop keyspace: %v", err)) + } + + err = createTable(session, fmt.Sprintf(`CREATE KEYSPACE %s + WITH replication = { + 'class' : 'SimpleStrategy', + 'replication_factor' : %d + }`, keyspace, *flagRF)) + + if err != nil { + panic(fmt.Sprintf("unable to create keyspace: %v", err)) + } +} + +func createSessionFromCluster(cluster *ClusterConfig, tb testing.TB) *Session { + // Drop and re-create the keyspace once. Different tests should use their own + // individual tables, but can assume that the table does not exist before. + initOnce.Do(func() { + createKeyspace(tb, cluster, "gocql_test") + }) + + cluster.Keyspace = "gocql_test" + session, err := cluster.CreateSession() + if err != nil { + tb.Fatal("createSession:", err) + } + + if err := session.control.awaitSchemaAgreement(); err != nil { + tb.Fatal(err) + } + + return session +} + +func createSession(tb testing.TB) *Session { + cluster := createCluster() + return createSessionFromCluster(cluster, tb) +} + +// createTestSession is hopefully moderately useful in actual unit tests +func createTestSession() *Session { + config := NewCluster() + config.NumConns = 1 + config.Timeout = 0 + config.DisableInitialHostLookup = true + config.IgnorePeerAddr = true + config.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy() + session := &Session{ + cfg: *config, + connCfg: &ConnConfig{ + Timeout: 10 * time.Millisecond, + Keepalive: 0, + }, + policy: config.PoolConfig.HostSelectionPolicy, + } + session.pool = config.PoolConfig.buildPool(session) + return session +} + +func staticAddressTranslator(newAddr net.IP, newPort int) AddressTranslator { + return AddressTranslatorFunc(func(addr net.IP, port int) (net.IP, int) { + return newAddr, newPort + }) +} + +func assertTrue(t *testing.T, description string, value bool) { + if !value { + t.Errorf("expected %s to be true", description) + } +} + +func assertEqual(t *testing.T, description string, expected, actual interface{}) { + if expected != actual { + t.Errorf("expected %s to be (%+v) but was (%+v) instead", description, expected, actual) + } +} + +func assertNil(t *testing.T, description string, actual interface{}) { + if actual != nil { + t.Errorf("expected %s to be (nil) but was (%+v) instead", description, actual) + } +} + +func assertNotNil(t *testing.T, description string, actual interface{}) { + if actual == nil { + t.Errorf("expected %s not to be (nil)", description) + } +} diff --git a/vendor/github.com/gocql/gocql/compressor_test.go b/vendor/github.com/gocql/gocql/compressor_test.go new file mode 100644 index 0000000000..cbf16a4687 --- /dev/null +++ b/vendor/github.com/gocql/gocql/compressor_test.go @@ -0,0 +1,40 @@ +// +build all unit + +package gocql + +import ( + "bytes" + "testing" + + "github.com/golang/snappy" +) + +func TestSnappyCompressor(t *testing.T) { + c := SnappyCompressor{} + if c.Name() != "snappy" { + t.Fatalf("expected name to be 'snappy', got %v", c.Name()) + } + + str := "My Test String" + //Test Encoding + expected := snappy.Encode(nil, []byte(str)) + if res, err := c.Encode([]byte(str)); err != nil { + t.Fatalf("failed to encode '%v' with error %v", str, err) + } else if bytes.Compare(expected, res) != 0 { + t.Fatal("failed to match the expected encoded value with the result encoded value.") + } + + val, err := c.Encode([]byte(str)) + if err != nil { + t.Fatalf("failed to encode '%v' with error '%v'", str, err) + } + + //Test Decoding + if expected, err := snappy.Decode(nil, val); err != nil { + t.Fatalf("failed to decode '%v' with error %v", val, err) + } else if res, err := c.Decode(val); err != nil { + t.Fatalf("failed to decode '%v' with error %v", val, err) + } else if bytes.Compare(expected, res) != 0 { + t.Fatal("failed to match the expected decoded value with the result decoded value.") + } +} diff --git a/vendor/github.com/gocql/gocql/conn_test.go b/vendor/github.com/gocql/gocql/conn_test.go new file mode 100644 index 0000000000..69b2569784 --- /dev/null +++ b/vendor/github.com/gocql/gocql/conn_test.go @@ -0,0 +1,935 @@ +// Copyright (c) 2012 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +build all unit + +package gocql + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net" + "strings" + "sync" + "sync/atomic" + "testing" + "time" +) + +const ( + defaultProto = protoVersion2 +) + +func TestApprove(t *testing.T) { + tests := map[bool]bool{ + approve("org.apache.cassandra.auth.PasswordAuthenticator"): true, + approve("com.instaclustr.cassandra.auth.SharedSecretAuthenticator"): true, + approve("com.datastax.bdp.cassandra.auth.DseAuthenticator"): true, + approve("com.apache.cassandra.auth.FakeAuthenticator"): false, + } + for k, v := range tests { + if k != v { + t.Fatalf("expected '%v', got '%v'", k, v) + } + } +} + +func TestJoinHostPort(t *testing.T) { + tests := map[string]string{ + "127.0.0.1:0": JoinHostPort("127.0.0.1", 0), + "127.0.0.1:1": JoinHostPort("127.0.0.1:1", 9142), + "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:0": JoinHostPort("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 0), + "[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1": JoinHostPort("[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:1", 9142), + } + for k, v := range tests { + if k != v { + t.Fatalf("expected '%v', got '%v'", k, v) + } + } +} + +func testCluster(addr string, proto protoVersion) *ClusterConfig { + cluster := NewCluster(addr) + cluster.ProtoVersion = int(proto) + cluster.disableControlConn = true + return cluster +} + +func TestSimple(t *testing.T) { + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := testCluster(srv.Address, defaultProto) + db, err := cluster.CreateSession() + if err != nil { + t.Fatalf("0x%x: NewCluster: %v", defaultProto, err) + } + + if err := db.Query("void").Exec(); err != nil { + t.Fatalf("0x%x: %v", defaultProto, err) + } +} + +func TestSSLSimple(t *testing.T) { + srv := NewSSLTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + db, err := createTestSslCluster(srv.Address, defaultProto, true).CreateSession() + if err != nil { + t.Fatalf("0x%x: NewCluster: %v", defaultProto, err) + } + + if err := db.Query("void").Exec(); err != nil { + t.Fatalf("0x%x: %v", defaultProto, err) + } +} + +func TestSSLSimpleNoClientCert(t *testing.T) { + srv := NewSSLTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + db, err := createTestSslCluster(srv.Address, defaultProto, false).CreateSession() + if err != nil { + t.Fatalf("0x%x: NewCluster: %v", defaultProto, err) + } + + if err := db.Query("void").Exec(); err != nil { + t.Fatalf("0x%x: %v", defaultProto, err) + } +} + +func createTestSslCluster(addr string, proto protoVersion, useClientCert bool) *ClusterConfig { + cluster := testCluster(addr, proto) + sslOpts := &SslOptions{ + CaPath: "testdata/pki/ca.crt", + EnableHostVerification: false, + } + + if useClientCert { + sslOpts.CertPath = "testdata/pki/gocql.crt" + sslOpts.KeyPath = "testdata/pki/gocql.key" + } + + cluster.SslOpts = sslOpts + return cluster +} + +func TestClosed(t *testing.T) { + t.Skip("Skipping the execution of TestClosed for now to try to concentrate on more important test failures on Travis") + + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + session, err := newTestSession(srv.Address, defaultProto) + if err != nil { + t.Fatalf("0x%x: NewCluster: %v", defaultProto, err) + } + + session.Close() + + if err := session.Query("void").Exec(); err != ErrSessionClosed { + t.Fatalf("0x%x: expected %#v, got %#v", defaultProto, ErrSessionClosed, err) + } +} + +func newTestSession(addr string, proto protoVersion) (*Session, error) { + return testCluster(addr, proto).CreateSession() +} + +func TestDNSLookupConnected(t *testing.T) { + log := &testLogger{} + Logger = log + defer func() { + Logger = &defaultLogger{} + }() + + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := NewCluster("cassandra1.invalid", srv.Address, "cassandra2.invalid") + cluster.ProtoVersion = int(defaultProto) + cluster.disableControlConn = true + + // CreateSession() should attempt to resolve the DNS name "cassandraX.invalid" + // and fail, but continue to connect via srv.Address + _, err := cluster.CreateSession() + if err != nil { + t.Fatal("CreateSession() should have connected") + } + + if !strings.Contains(log.String(), "gocql: dns error") { + t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String()) + } +} + +func TestDNSLookupError(t *testing.T) { + log := &testLogger{} + Logger = log + defer func() { + Logger = &defaultLogger{} + }() + + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := NewCluster("cassandra1.invalid", "cassandra2.invalid") + cluster.ProtoVersion = int(defaultProto) + cluster.disableControlConn = true + + // CreateSession() should attempt to resolve each DNS name "cassandraX.invalid" + // and fail since it could not resolve any dns entries + _, err := cluster.CreateSession() + if err == nil { + t.Fatal("CreateSession() should have returned an error") + } + + if !strings.Contains(log.String(), "gocql: dns error") { + t.Fatalf("Expected to receive dns error log message - got '%s' instead", log.String()) + } + + if err.Error() != "gocql: unable to create session: failed to resolve any of the provided hostnames" { + t.Fatalf("Expected CreateSession() to fail with message - got '%s' instead", err.Error()) + } +} + +func TestStartupTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + log := &testLogger{} + Logger = log + defer func() { + Logger = &defaultLogger{} + }() + + srv := NewTestServer(t, defaultProto, ctx) + defer srv.Stop() + + // Tell the server to never respond to Startup frame + atomic.StoreInt32(&srv.TimeoutOnStartup, 1) + + startTime := time.Now() + cluster := NewCluster(srv.Address) + cluster.ProtoVersion = int(defaultProto) + cluster.disableControlConn = true + // Set very long query connection timeout + // so we know CreateSession() is using the ConnectTimeout + cluster.Timeout = time.Second * 5 + + // Create session should timeout during connect attempt + _, err := cluster.CreateSession() + if err == nil { + t.Fatal("CreateSession() should have returned a timeout error") + } + + elapsed := time.Since(startTime) + if elapsed > time.Second*5 { + t.Fatal("ConnectTimeout is not respected") + } + + if !strings.Contains(err.Error(), "no connections were made when creating the session") { + t.Fatalf("Expected to receive no connections error - got '%s'", err) + } + + if !strings.Contains(log.String(), "no response to connection startup within timeout") { + t.Fatalf("Expected to receive timeout log message - got '%s'", log.String()) + } + + cancel() +} + +func TestTimeout(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + srv := NewTestServer(t, defaultProto, ctx) + defer srv.Stop() + + db, err := newTestSession(srv.Address, defaultProto) + if err != nil { + t.Fatalf("NewCluster: %v", err) + } + defer db.Close() + + var wg sync.WaitGroup + wg.Add(1) + + go func() { + defer wg.Done() + + select { + case <-time.After(5 * time.Second): + t.Errorf("no timeout") + case <-ctx.Done(): + } + }() + + if err := db.Query("kill").WithContext(ctx).Exec(); err == nil { + t.Fatal("expected error got nil") + } + cancel() + + wg.Wait() +} + +// TestQueryRetry will test to make sure that gocql will execute +// the exact amount of retry queries designated by the user. +func TestQueryRetry(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + srv := NewTestServer(t, defaultProto, ctx) + defer srv.Stop() + + db, err := newTestSession(srv.Address, defaultProto) + if err != nil { + t.Fatalf("NewCluster: %v", err) + } + defer db.Close() + + go func() { + select { + case <-ctx.Done(): + return + case <-time.After(5 * time.Second): + t.Errorf("no timeout") + } + }() + + rt := &SimpleRetryPolicy{NumRetries: 1} + + qry := db.Query("kill").RetryPolicy(rt) + if err := qry.Exec(); err == nil { + t.Fatalf("expected error") + } + + requests := atomic.LoadInt64(&srv.nKillReq) + attempts := qry.Attempts() + if requests != int64(attempts) { + t.Fatalf("expected requests %v to match query attempts %v", requests, attempts) + } + + // the query will only be attempted once, but is being retried + if requests != int64(rt.NumRetries) { + t.Fatalf("failed to retry the query %v time(s). Query executed %v times", rt.NumRetries, requests-1) + } +} + +func TestStreams_Protocol1(t *testing.T) { + srv := NewTestServer(t, protoVersion1, context.Background()) + defer srv.Stop() + + // TODO: these are more like session tests and should instead operate + // on a single Conn + cluster := testCluster(srv.Address, protoVersion1) + cluster.NumConns = 1 + cluster.ProtoVersion = 1 + + db, err := cluster.CreateSession() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + var wg sync.WaitGroup + for i := 1; i < 128; i++ { + // here were just validating that if we send NumStream request we get + // a response for every stream and the lengths for the queries are set + // correctly. + wg.Add(1) + go func() { + defer wg.Done() + if err := db.Query("void").Exec(); err != nil { + t.Error(err) + } + }() + } + wg.Wait() +} + +func TestStreams_Protocol3(t *testing.T) { + srv := NewTestServer(t, protoVersion3, context.Background()) + defer srv.Stop() + + // TODO: these are more like session tests and should instead operate + // on a single Conn + cluster := testCluster(srv.Address, protoVersion3) + cluster.NumConns = 1 + cluster.ProtoVersion = 3 + + db, err := cluster.CreateSession() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + for i := 1; i < 32768; i++ { + // the test server processes each conn synchronously + // here were just validating that if we send NumStream request we get + // a response for every stream and the lengths for the queries are set + // correctly. + if err = db.Query("void").Exec(); err != nil { + t.Fatal(err) + } + } +} + +func BenchmarkProtocolV3(b *testing.B) { + srv := NewTestServer(b, protoVersion3, context.Background()) + defer srv.Stop() + + // TODO: these are more like session tests and should instead operate + // on a single Conn + cluster := NewCluster(srv.Address) + cluster.NumConns = 1 + cluster.ProtoVersion = 3 + + db, err := cluster.CreateSession() + if err != nil { + b.Fatal(err) + } + defer db.Close() + + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + if err = db.Query("void").Exec(); err != nil { + b.Fatal(err) + } + } +} + +// This tests that the policy connection pool handles SSL correctly +func TestPolicyConnPoolSSL(t *testing.T) { + srv := NewSSLTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := createTestSslCluster(srv.Address, defaultProto, true) + cluster.PoolConfig.HostSelectionPolicy = RoundRobinHostPolicy() + + db, err := cluster.CreateSession() + if err != nil { + t.Fatalf("failed to create new session: %v", err) + } + + if err := db.Query("void").Exec(); err != nil { + t.Fatalf("query failed due to error: %v", err) + } + db.Close() + + // wait for the pool to drain + time.Sleep(100 * time.Millisecond) + size := db.pool.Size() + if size != 0 { + t.Fatalf("connection pool did not drain, still contains %d connections", size) + } +} + +func TestQueryTimeout(t *testing.T) { + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := testCluster(srv.Address, defaultProto) + // Set the timeout arbitrarily low so that the query hits the timeout in a + // timely manner. + cluster.Timeout = 1 * time.Millisecond + + db, err := cluster.CreateSession() + if err != nil { + t.Fatalf("NewCluster: %v", err) + } + defer db.Close() + + ch := make(chan error, 1) + + go func() { + err := db.Query("timeout").Exec() + if err != nil { + ch <- err + return + } + t.Errorf("err was nil, expected to get a timeout after %v", db.cfg.Timeout) + }() + + select { + case err := <-ch: + if err != ErrTimeoutNoResponse { + t.Fatalf("expected to get %v for timeout got %v", ErrTimeoutNoResponse, err) + } + case <-time.After(10*time.Millisecond + db.cfg.Timeout): + // ensure that the query goroutines have been scheduled + t.Fatalf("query did not timeout after %v", db.cfg.Timeout) + } +} + +func BenchmarkSingleConn(b *testing.B) { + srv := NewTestServer(b, 3, context.Background()) + defer srv.Stop() + + cluster := testCluster(srv.Address, 3) + // Set the timeout arbitrarily low so that the query hits the timeout in a + // timely manner. + cluster.Timeout = 500 * time.Millisecond + cluster.NumConns = 1 + db, err := cluster.CreateSession() + if err != nil { + b.Fatalf("NewCluster: %v", err) + } + defer db.Close() + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := db.Query("void").Exec() + if err != nil { + b.Error(err) + return + } + } + }) +} + +func TestQueryTimeoutReuseStream(t *testing.T) { + t.Skip("no longer tests anything") + // TODO(zariel): move this to conn test, we really just want to check what + // happens when a conn is + + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := testCluster(srv.Address, defaultProto) + // Set the timeout arbitrarily low so that the query hits the timeout in a + // timely manner. + cluster.Timeout = 1 * time.Millisecond + cluster.NumConns = 1 + + db, err := cluster.CreateSession() + if err != nil { + t.Fatalf("NewCluster: %v", err) + } + defer db.Close() + + db.Query("slow").Exec() + + err = db.Query("void").Exec() + if err != nil { + t.Fatal(err) + } +} + +func TestQueryTimeoutClose(t *testing.T) { + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := testCluster(srv.Address, defaultProto) + // Set the timeout arbitrarily low so that the query hits the timeout in a + // timely manner. + cluster.Timeout = 1000 * time.Millisecond + cluster.NumConns = 1 + + db, err := cluster.CreateSession() + if err != nil { + t.Fatalf("NewCluster: %v", err) + } + + ch := make(chan error) + go func() { + err := db.Query("timeout").Exec() + ch <- err + }() + // ensure that the above goroutine gets sheduled + time.Sleep(50 * time.Millisecond) + + db.Close() + select { + case err = <-ch: + case <-time.After(1 * time.Second): + t.Fatal("timedout waiting to get a response once cluster is closed") + } + + if err != ErrConnectionClosed { + t.Fatalf("expected to get %v got %v", ErrConnectionClosed, err) + } +} + +func TestStream0(t *testing.T) { + // TODO: replace this with type check + const expErr = "gocql: received unexpected frame on stream 0" + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + srv := NewTestServer(t, defaultProto, ctx) + defer srv.Stop() + + errorHandler := connErrorHandlerFn(func(conn *Conn, err error, closed bool) { + if !srv.isClosed() && !strings.HasPrefix(err.Error(), expErr) { + select { + case <-ctx.Done(): + return + default: + t.Errorf("expected to get error prefix %q got %q", expErr, err.Error()) + } + } + }) + + conn, err := Connect(srv.host(), &ConnConfig{ProtoVersion: int(srv.protocol)}, errorHandler, createTestSession()) + if err != nil { + t.Fatal(err) + } + + writer := frameWriterFunc(func(f *framer, streamID int) error { + f.writeQueryFrame(0, "void", &queryParams{}) + return f.finishWrite() + }) + + // need to write out an invalid frame, which we need a connection to do + framer, err := conn.exec(ctx, writer, nil) + if err == nil { + t.Fatal("expected to get an error on stream 0") + } else if !strings.HasPrefix(err.Error(), expErr) { + t.Fatalf("expected to get error prefix %q got %q", expErr, err.Error()) + } else if framer != nil { + frame, err := framer.parseFrame() + if err != nil { + t.Fatal(err) + } + t.Fatalf("got frame %v", frame) + } +} + +func TestConnClosedBlocked(t *testing.T) { + // issue 664 + const proto = 3 + + srv := NewTestServer(t, proto, context.Background()) + defer srv.Stop() + errorHandler := connErrorHandlerFn(func(conn *Conn, err error, closed bool) { + t.Log(err) + }) + + conn, err := Connect(srv.host(), &ConnConfig{ProtoVersion: int(srv.protocol)}, errorHandler, createTestSession()) + if err != nil { + t.Fatal(err) + } + + if err := conn.conn.Close(); err != nil { + t.Fatal(err) + } + + // This will block indefintaly if #664 is not fixed + err = conn.executeQuery(&Query{stmt: "void"}).Close() + if !strings.HasSuffix(err.Error(), "use of closed network connection") { + t.Fatalf("expected to get use of closed networking connection error got: %v\n", err) + } +} + +func TestContext_Timeout(t *testing.T) { + srv := NewTestServer(t, defaultProto, context.Background()) + defer srv.Stop() + + cluster := testCluster(srv.Address, defaultProto) + cluster.Timeout = 5 * time.Second + db, err := cluster.CreateSession() + if err != nil { + t.Fatal(err) + } + defer db.Close() + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err = db.Query("timeout").WithContext(ctx).Exec() + if err != context.Canceled { + t.Fatalf("expected to get context cancel error: %v got %v", context.Canceled, err) + } +} + +func NewTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer { + laddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:0") + if err != nil { + t.Fatal(err) + } + + listen, err := net.ListenTCP("tcp", laddr) + if err != nil { + t.Fatal(err) + } + + headerSize := 8 + if protocol > protoVersion2 { + headerSize = 9 + } + + ctx, cancel := context.WithCancel(ctx) + srv := &TestServer{ + Address: listen.Addr().String(), + listen: listen, + t: t, + protocol: protocol, + headerSize: headerSize, + ctx: ctx, + cancel: cancel, + } + + go srv.closeWatch() + go srv.serve() + + return srv +} + +func NewSSLTestServer(t testing.TB, protocol uint8, ctx context.Context) *TestServer { + pem, err := ioutil.ReadFile("testdata/pki/ca.crt") + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(pem) { + t.Fatalf("Failed parsing or appending certs") + } + mycert, err := tls.LoadX509KeyPair("testdata/pki/cassandra.crt", "testdata/pki/cassandra.key") + if err != nil { + t.Fatalf("could not load cert") + } + config := &tls.Config{ + Certificates: []tls.Certificate{mycert}, + RootCAs: certPool, + } + listen, err := tls.Listen("tcp", "127.0.0.1:0", config) + if err != nil { + t.Fatal(err) + } + + headerSize := 8 + if protocol > protoVersion2 { + headerSize = 9 + } + + ctx, cancel := context.WithCancel(ctx) + srv := &TestServer{ + Address: listen.Addr().String(), + listen: listen, + t: t, + protocol: protocol, + headerSize: headerSize, + ctx: ctx, + cancel: cancel, + } + + go srv.closeWatch() + go srv.serve() + return srv +} + +type TestServer struct { + Address string + TimeoutOnStartup int32 + t testing.TB + nreq uint64 + listen net.Listener + nKillReq int64 + compressor Compressor + + protocol byte + headerSize int + ctx context.Context + cancel context.CancelFunc + + quit chan struct{} + mu sync.Mutex + closed bool +} + +func (srv *TestServer) host() *HostInfo { + host, err := hostInfo(srv.Address, 9042) + if err != nil { + srv.t.Fatal(err) + } + return host +} + +func (srv *TestServer) closeWatch() { + <-srv.ctx.Done() + + srv.mu.Lock() + defer srv.mu.Unlock() + + srv.closeLocked() +} + +func (srv *TestServer) serve() { + defer srv.listen.Close() + for { + select { + case <-srv.ctx.Done(): + return + default: + } + + conn, err := srv.listen.Accept() + if err != nil { + break + } + + go func(conn net.Conn) { + defer conn.Close() + for { + select { + case <-srv.ctx.Done(): + return + default: + } + + framer, err := srv.readFrame(conn) + if err != nil { + if err == io.EOF { + return + } + + select { + case <-srv.ctx.Done(): + return + default: + } + + srv.t.Error(err) + return + } + + atomic.AddUint64(&srv.nreq, 1) + + go srv.process(framer) + } + }(conn) + } +} + +func (srv *TestServer) isClosed() bool { + srv.mu.Lock() + defer srv.mu.Unlock() + return srv.closed +} + +func (srv *TestServer) closeLocked() { + if srv.closed { + return + } + + srv.closed = true + + srv.listen.Close() + srv.cancel() +} + +func (srv *TestServer) Stop() { + srv.mu.Lock() + defer srv.mu.Unlock() + srv.closeLocked() +} + +func (srv *TestServer) process(f *framer) { + head := f.header + if head == nil { + select { + case <-srv.ctx.Done(): + return + default: + } + + srv.t.Error("process frame with a nil header") + return + } + + switch head.op { + case opStartup: + if atomic.LoadInt32(&srv.TimeoutOnStartup) > 0 { + // Do not respond to startup command + // wait until we get a cancel signal + select { + case <-srv.ctx.Done(): + return + } + } + f.writeHeader(0, opReady, head.stream) + case opOptions: + f.writeHeader(0, opSupported, head.stream) + f.writeShort(0) + case opQuery: + query := f.readLongString() + first := query + if n := strings.Index(query, " "); n > 0 { + first = first[:n] + } + switch strings.ToLower(first) { + case "kill": + atomic.AddInt64(&srv.nKillReq, 1) + f.writeHeader(0, opError, head.stream) + f.writeInt(0x1001) + f.writeString("query killed") + case "use": + f.writeInt(resultKindKeyspace) + f.writeString(strings.TrimSpace(query[3:])) + case "void": + f.writeHeader(0, opResult, head.stream) + f.writeInt(resultKindVoid) + case "timeout": + <-srv.ctx.Done() + return + case "slow": + go func() { + f.writeHeader(0, opResult, head.stream) + f.writeInt(resultKindVoid) + f.wbuf[0] = srv.protocol | 0x80 + select { + case <-srv.ctx.Done(): + return + case <-time.After(50 * time.Millisecond): + f.finishWrite() + } + }() + return + default: + f.writeHeader(0, opResult, head.stream) + f.writeInt(resultKindVoid) + } + case opError: + f.writeHeader(0, opError, head.stream) + f.wbuf = append(f.wbuf, f.rbuf...) + default: + f.writeHeader(0, opError, head.stream) + f.writeInt(0) + f.writeString("not supported") + } + + f.wbuf[0] = srv.protocol | 0x80 + + if err := f.finishWrite(); err != nil { + select { + case <-srv.ctx.Done(): + return + default: + } + + srv.t.Error(err) + } +} + +func (srv *TestServer) readFrame(conn net.Conn) (*framer, error) { + buf := make([]byte, srv.headerSize) + head, err := readHeader(conn, buf) + if err != nil { + return nil, err + } + framer := newFramer(conn, conn, nil, srv.protocol) + + err = framer.readFrame(&head) + if err != nil { + return nil, err + } + + // should be a request frame + if head.version.response() { + return nil, fmt.Errorf("expected to read a request frame got version: %v", head.version) + } else if head.version.version() != srv.protocol { + return nil, fmt.Errorf("expected to read protocol version 0x%x got 0x%x", srv.protocol, head.version.version()) + } + + return framer, nil +} diff --git a/vendor/github.com/gocql/gocql/control_test.go b/vendor/github.com/gocql/gocql/control_test.go new file mode 100644 index 0000000000..a63a8d2eeb --- /dev/null +++ b/vendor/github.com/gocql/gocql/control_test.go @@ -0,0 +1,66 @@ +package gocql + +import ( + "net" + "testing" +) + +func TestHostInfo_Lookup(t *testing.T) { + hostLookupPreferV4 = true + defer func() { hostLookupPreferV4 = false }() + + tests := [...]struct { + addr string + ip net.IP + }{ + {"127.0.0.1", net.IPv4(127, 0, 0, 1)}, + {"localhost", net.IPv4(127, 0, 0, 1)}, // TODO: this may be host dependant + } + + for i, test := range tests { + host, err := hostInfo(test.addr, 1) + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + + if !host.ConnectAddress().Equal(test.ip) { + t.Errorf("expected ip %v got %v for addr %q", test.ip, host.ConnectAddress(), test.addr) + } + } +} + +func TestParseProtocol(t *testing.T) { + tests := [...]struct { + err error + proto int + }{ + { + err: &protocolError{ + frame: errorFrame{ + code: 0x10, + message: "Invalid or unsupported protocol version (5); the lowest supported version is 3 and the greatest is 4", + }, + }, + proto: 4, + }, + { + err: &protocolError{ + frame: errorFrame{ + frameHeader: frameHeader{ + version: 0x83, + }, + code: 0x10, + message: "Invalid or unsupported protocol version: 5", + }, + }, + proto: 3, + }, + } + + for i, test := range tests { + if proto := parseProtocolFromError(test.err); proto != test.proto { + t.Errorf("%d: exepcted proto %d got %d", i, test.proto, proto) + } + } +} diff --git a/vendor/github.com/gocql/gocql/errors_test.go b/vendor/github.com/gocql/gocql/errors_test.go new file mode 100644 index 0000000000..b774d8a34f --- /dev/null +++ b/vendor/github.com/gocql/gocql/errors_test.go @@ -0,0 +1,29 @@ +// +build all integration + +package gocql + +import ( + "testing" +) + +func TestErrorsParse(t *testing.T) { + session := createSession(t) + defer session.Close() + + if err := createTable(session, `CREATE TABLE gocql_test.errors_parse (id int primary key)`); err != nil { + t.Fatal("create:", err) + } + + if err := createTable(session, `CREATE TABLE gocql_test.errors_parse (id int primary key)`); err == nil { + t.Fatal("Should have gotten already exists error from cassandra server.") + } else { + switch e := err.(type) { + case *RequestErrAlreadyExists: + if e.Table != "errors_parse" { + t.Fatalf("expected error table to be 'errors_parse' but was %q", e.Table) + } + default: + t.Fatalf("expected to get RequestErrAlreadyExists instead got %T", e) + } + } +} diff --git a/vendor/github.com/gocql/gocql/events_ccm_test.go b/vendor/github.com/gocql/gocql/events_ccm_test.go new file mode 100644 index 0000000000..ed54fe8c64 --- /dev/null +++ b/vendor/github.com/gocql/gocql/events_ccm_test.go @@ -0,0 +1,297 @@ +// +build ccm, ignore + +package gocql + +import ( + "log" + "testing" + "time" + + "github.com/gocql/gocql/internal/ccm" +) + +func TestEventDiscovery(t *testing.T) { + t.Skip("FLAKE skipping") + if err := ccm.AllUp(); err != nil { + t.Fatal(err) + } + + session := createSession(t) + defer session.Close() + + status, err := ccm.Status() + if err != nil { + t.Fatal(err) + } + t.Logf("status=%+v\n", status) + + session.pool.mu.RLock() + poolHosts := session.pool.hostConnPools // TODO: replace with session.ring + t.Logf("poolhosts=%+v\n", poolHosts) + // check we discovered all the nodes in the ring + for _, host := range status { + if _, ok := poolHosts[host.Addr]; !ok { + t.Errorf("did not discover %q", host.Addr) + } + } + session.pool.mu.RUnlock() + if t.Failed() { + t.FailNow() + } +} + +func TestEventNodeDownControl(t *testing.T) { + t.Skip("FLAKE skipping") + const targetNode = "node1" + if err := ccm.AllUp(); err != nil { + t.Fatal(err) + } + + status, err := ccm.Status() + if err != nil { + t.Fatal(err) + } + + cluster := createCluster() + cluster.Hosts = []string{status[targetNode].Addr} + session := createSessionFromCluster(cluster, t) + defer session.Close() + + t.Log("marking " + targetNode + " as down") + if err := ccm.NodeDown(targetNode); err != nil { + t.Fatal(err) + } + + t.Logf("status=%+v\n", status) + t.Logf("marking node %q down: %v\n", targetNode, status[targetNode]) + + time.Sleep(5 * time.Second) + + session.pool.mu.RLock() + + poolHosts := session.pool.hostConnPools + node := status[targetNode] + t.Logf("poolhosts=%+v\n", poolHosts) + + if _, ok := poolHosts[node.Addr]; ok { + session.pool.mu.RUnlock() + t.Fatal("node not removed after remove event") + } + session.pool.mu.RUnlock() + + host := session.ring.getHost(node.Addr) + if host == nil { + t.Fatal("node not in metadata ring") + } else if host.IsUp() { + t.Fatalf("not not marked as down after event in metadata: %v", host) + } +} + +func TestEventNodeDown(t *testing.T) { + t.Skip("FLAKE skipping") + const targetNode = "node3" + if err := ccm.AllUp(); err != nil { + t.Fatal(err) + } + + session := createSession(t) + defer session.Close() + + if err := ccm.NodeDown(targetNode); err != nil { + t.Fatal(err) + } + + status, err := ccm.Status() + if err != nil { + t.Fatal(err) + } + t.Logf("status=%+v\n", status) + t.Logf("marking node %q down: %v\n", targetNode, status[targetNode]) + + time.Sleep(5 * time.Second) + + session.pool.mu.RLock() + defer session.pool.mu.RUnlock() + + poolHosts := session.pool.hostConnPools + node := status[targetNode] + t.Logf("poolhosts=%+v\n", poolHosts) + + if _, ok := poolHosts[node.Addr]; ok { + t.Fatal("node not removed after remove event") + } + + host := session.ring.getHost(node.Addr) + if host == nil { + t.Fatal("node not in metadata ring") + } else if host.IsUp() { + t.Fatalf("not not marked as down after event in metadata: %v", host) + } +} + +func TestEventNodeUp(t *testing.T) { + t.Skip("FLAKE skipping") + if err := ccm.AllUp(); err != nil { + t.Fatal(err) + } + + status, err := ccm.Status() + if err != nil { + t.Fatal(err) + } + log.Printf("status=%+v\n", status) + + session := createSession(t) + defer session.Close() + + const targetNode = "node2" + node := status[targetNode] + + _, ok := session.pool.getPool(node.Addr) + if !ok { + session.pool.mu.RLock() + t.Errorf("target pool not in connection pool: addr=%q pools=%v", status[targetNode].Addr, session.pool.hostConnPools) + session.pool.mu.RUnlock() + t.FailNow() + } + + if err := ccm.NodeDown(targetNode); err != nil { + t.Fatal(err) + } + + time.Sleep(5 * time.Second) + + _, ok = session.pool.getPool(node.Addr) + if ok { + t.Fatal("node not removed after remove event") + } + + if err := ccm.NodeUp(targetNode); err != nil { + t.Fatal(err) + } + + // cassandra < 2.2 needs 10 seconds to start up the binary service + time.Sleep(15 * time.Second) + + _, ok = session.pool.getPool(node.Addr) + if !ok { + t.Fatal("node not added after node added event") + } + + host := session.ring.getHost(node.Addr) + if host == nil { + t.Fatal("node not in metadata ring") + } else if !host.IsUp() { + t.Fatalf("not not marked as UP after event in metadata: addr=%q host=%p: %v", node.Addr, host, host) + } +} + +func TestEventFilter(t *testing.T) { + t.Skip("FLAKE skipping") + if err := ccm.AllUp(); err != nil { + t.Fatal(err) + } + + status, err := ccm.Status() + if err != nil { + t.Fatal(err) + } + log.Printf("status=%+v\n", status) + + cluster := createCluster() + cluster.HostFilter = WhiteListHostFilter(status["node1"].Addr) + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if _, ok := session.pool.getPool(status["node1"].Addr); !ok { + t.Errorf("should have %v in pool but dont", "node1") + } + + for _, host := range [...]string{"node2", "node3"} { + _, ok := session.pool.getPool(status[host].Addr) + if ok { + t.Errorf("should not have %v in pool", host) + } + } + + if t.Failed() { + t.FailNow() + } + + if err := ccm.NodeDown("node2"); err != nil { + t.Fatal(err) + } + + time.Sleep(5 * time.Second) + + if err := ccm.NodeUp("node2"); err != nil { + t.Fatal(err) + } + + time.Sleep(15 * time.Second) + for _, host := range [...]string{"node2", "node3"} { + _, ok := session.pool.getPool(status[host].Addr) + if ok { + t.Errorf("should not have %v in pool", host) + } + } + + if t.Failed() { + t.FailNow() + } + +} + +func TestEventDownQueryable(t *testing.T) { + t.Skip("FLAKE skipping") + if err := ccm.AllUp(); err != nil { + t.Fatal(err) + } + + status, err := ccm.Status() + if err != nil { + t.Fatal(err) + } + log.Printf("status=%+v\n", status) + + const targetNode = "node1" + + addr := status[targetNode].Addr + + cluster := createCluster() + cluster.Hosts = []string{addr} + cluster.HostFilter = WhiteListHostFilter(addr) + session := createSessionFromCluster(cluster, t) + defer session.Close() + + if pool, ok := session.pool.getPool(addr); !ok { + t.Fatalf("should have %v in pool but dont", addr) + } else if !pool.host.IsUp() { + t.Fatalf("host is not up %v", pool.host) + } + + if err := ccm.NodeDown(targetNode); err != nil { + t.Fatal(err) + } + + time.Sleep(5 * time.Second) + + if err := ccm.NodeUp(targetNode); err != nil { + t.Fatal(err) + } + + time.Sleep(15 * time.Second) + + if pool, ok := session.pool.getPool(addr); !ok { + t.Fatalf("should have %v in pool but dont", addr) + } else if !pool.host.IsUp() { + t.Fatalf("host is not up %v", pool.host) + } + + var rows int + if err := session.Query("SELECT COUNT(*) FROM system.local").Scan(&rows); err != nil { + t.Fatal(err) + } else if rows != 1 { + t.Fatalf("expected to get 1 row got %d", rows) + } +} diff --git a/vendor/github.com/gocql/gocql/events_test.go b/vendor/github.com/gocql/gocql/events_test.go new file mode 100644 index 0000000000..c56bc07d4b --- /dev/null +++ b/vendor/github.com/gocql/gocql/events_test.go @@ -0,0 +1,33 @@ +package gocql + +import ( + "net" + "sync" + "testing" +) + +func TestEventDebounce(t *testing.T) { + const eventCount = 150 + wg := &sync.WaitGroup{} + wg.Add(1) + + eventsSeen := 0 + debouncer := newEventDebouncer("testDebouncer", func(events []frame) { + defer wg.Done() + eventsSeen += len(events) + }) + defer debouncer.stop() + + for i := 0; i < eventCount; i++ { + debouncer.debounce(&statusChangeEventFrame{ + change: "UP", + host: net.IPv4(127, 0, 0, 1), + port: 9042, + }) + } + + wg.Wait() + if eventCount != eventsSeen { + t.Fatalf("expected to see %d events but got %d", eventCount, eventsSeen) + } +} diff --git a/vendor/github.com/gocql/gocql/filters_test.go b/vendor/github.com/gocql/gocql/filters_test.go new file mode 100644 index 0000000000..1ccf1a1cac --- /dev/null +++ b/vendor/github.com/gocql/gocql/filters_test.go @@ -0,0 +1,93 @@ +package gocql + +import ( + "net" + "testing" +) + +func TestFilter_WhiteList(t *testing.T) { + f := WhiteListHostFilter("127.0.0.1", "127.0.0.2") + tests := [...]struct { + addr net.IP + accept bool + }{ + {net.ParseIP("127.0.0.1"), true}, + {net.ParseIP("127.0.0.2"), true}, + {net.ParseIP("127.0.0.3"), false}, + } + + for i, test := range tests { + if f.Accept(&HostInfo{connectAddress: test.addr}) { + if !test.accept { + t.Errorf("%d: should not have been accepted but was", i) + } + } else if test.accept { + t.Errorf("%d: should have been accepted but wasn't", i) + } + } +} + +func TestFilter_AllowAll(t *testing.T) { + f := AcceptAllFilter() + tests := [...]struct { + addr net.IP + accept bool + }{ + {net.ParseIP("127.0.0.1"), true}, + {net.ParseIP("127.0.0.2"), true}, + {net.ParseIP("127.0.0.3"), true}, + } + + for i, test := range tests { + if f.Accept(&HostInfo{connectAddress: test.addr}) { + if !test.accept { + t.Errorf("%d: should not have been accepted but was", i) + } + } else if test.accept { + t.Errorf("%d: should have been accepted but wasn't", i) + } + } +} + +func TestFilter_DenyAll(t *testing.T) { + f := DenyAllFilter() + tests := [...]struct { + addr net.IP + accept bool + }{ + {net.ParseIP("127.0.0.1"), false}, + {net.ParseIP("127.0.0.2"), false}, + {net.ParseIP("127.0.0.3"), false}, + } + + for i, test := range tests { + if f.Accept(&HostInfo{connectAddress: test.addr}) { + if !test.accept { + t.Errorf("%d: should not have been accepted but was", i) + } + } else if test.accept { + t.Errorf("%d: should have been accepted but wasn't", i) + } + } +} + +func TestFilter_DataCentre(t *testing.T) { + f := DataCentreHostFilter("dc1") + tests := [...]struct { + dc string + accept bool + }{ + {"dc1", true}, + {"dc2", false}, + } + + for i, test := range tests { + if f.Accept(&HostInfo{dataCenter: test.dc}) { + if !test.accept { + t.Errorf("%d: should not have been accepted but was", i) + } + } else if test.accept { + t.Errorf("%d: should have been accepted but wasn't", i) + } + } +} diff --git a/vendor/github.com/gocql/gocql/frame_test.go b/vendor/github.com/gocql/gocql/frame_test.go new file mode 100644 index 0000000000..701c04184c --- /dev/null +++ b/vendor/github.com/gocql/gocql/frame_test.go @@ -0,0 +1,106 @@ +package gocql + +import ( + "bytes" + "testing" +) + +func TestFuzzBugs(t *testing.T) { + // these inputs are found using go-fuzz (https://github.com/dvyukov/go-fuzz) + // and should cause a panic unless fixed. + tests := [][]byte{ + []byte("00000\xa0000"), + []byte("\x8000\x0e\x00\x00\x00\x000"), + []byte("\x8000\x00\x00\x00\x00\t0000000000"), + []byte("\xa0\xff\x01\xae\xefqE\xf2\x1a"), + []byte("\x8200\b\x00\x00\x00c\x00\x00\x00\x02000\x01\x00\x00\x00\x03" + + "\x00\n0000000000\x00\x14000000" + + "00000000000000\x00\x020000" + + "\x00\a000000000\x00\x050000000" + + "\xff0000000000000000000" + + "0000000"), + []byte("\x82\xe600\x00\x00\x00\x000"), + []byte("\x8200\b\x00\x00\x00\b0\x00\x00\x00\x040000"), + []byte("\x8200\x00\x00\x00\x00\x100\x00\x00\x12\x00\x00\x0000000" + + "00000"), + []byte("\x83000\b\x00\x00\x00\x14\x00\x00\x00\x020000000" + + "000000000"), + []byte("\x83000\b\x00\x00\x000\x00\x00\x00\x04\x00\x1000000" + + "00000000000000e00000" + + "000\x800000000000000000" + + "0000000000000"), + } + + for i, test := range tests { + t.Logf("test %d input: %q", i, test) + + var bw bytes.Buffer + + r := bytes.NewReader(test) + head, err := readHeader(r, make([]byte, 9)) + if err != nil { + continue + } + + framer := newFramer(r, &bw, nil, byte(head.version)) + err = framer.readFrame(&head) + if err != nil { + continue + } + + frame, err := framer.parseFrame() + if err != nil { + continue + } + + t.Errorf("(%d) expected to fail for input % X", i, test) + t.Errorf("(%d) frame=%+#v", i, frame) + } +} + +func TestFrameWriteTooLong(t *testing.T) { + w := &bytes.Buffer{} + framer := newFramer(nil, w, nil, 2) + + framer.writeHeader(0, opStartup, 1) + framer.writeBytes(make([]byte, maxFrameSize+1)) + err := framer.finishWrite() + if err != ErrFrameTooBig { + t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err) + } +} + +func TestFrameReadTooLong(t *testing.T) { + r := &bytes.Buffer{} + r.Write(make([]byte, maxFrameSize+1)) + // write a new header right after this frame to verify that we can read it + r.Write([]byte{0x02, 0x00, 0x00, byte(opReady), 0x00, 0x00, 0x00, 0x00}) + + framer := newFramer(r, nil, nil, 2) + + head := frameHeader{ + version: 2, + op: opReady, + length: r.Len() - 8, + } + + err := framer.readFrame(&head) + if err != ErrFrameTooBig { + t.Fatalf("expected to get %v got %v", ErrFrameTooBig, err) + } + + head, err = readHeader(r, make([]byte, 8)) + if err != nil { + t.Fatal(err) + } + if head.op != opReady { + t.Fatalf("expected to get header %v got %v", opReady, head.op) + } +} + +func TestParseConsistencyErrorInsteadOfPanic(t *testing.T) { + _, err := ParseConsistencyWrapper("TEST") + if err == nil { + t.Fatal("expected ParseConsistencyWrapper error got nil") + } +} diff --git a/vendor/github.com/gocql/gocql/framer_bench_test.go b/vendor/github.com/gocql/gocql/framer_bench_test.go new file mode 100644 index 0000000000..06dfac45e8 --- /dev/null +++ b/vendor/github.com/gocql/gocql/framer_bench_test.go @@ -0,0 +1,48 @@ +package gocql + +import ( + "compress/gzip" + "io/ioutil" + "os" + "testing" +) + +func readGzipData(path string) ([]byte, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + r, err := gzip.NewReader(f) + if err != nil { + return nil, err + } + defer r.Close() + + return ioutil.ReadAll(r) +} + +func BenchmarkParseRowsFrame(b *testing.B) { + data, err := readGzipData("testdata/frames/bench_parse_result.gz") + if err != nil { + b.Fatal(err) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + framer := &framer{ + header: &frameHeader{ + version: protoVersion4 | 0x80, + op: opResult, + length: len(data), + }, + rbuf: data, + } + + _, err = framer.parseFrame() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/gocql/gocql/host_source_test.go b/vendor/github.com/gocql/gocql/host_source_test.go new file mode 100644 index 0000000000..790453ad2f --- /dev/null +++ b/vendor/github.com/gocql/gocql/host_source_test.go @@ -0,0 +1,137 @@ +// +build all integration + +package gocql + +import ( + "fmt" + "net" + "testing" +) + +func TestUnmarshalCassVersion(t *testing.T) { + tests := [...]struct { + data string + version cassVersion + }{ + {"3.2", cassVersion{3, 2, 0}}, + {"2.10.1-SNAPSHOT", cassVersion{2, 10, 1}}, + {"1.2.3", cassVersion{1, 2, 3}}, + } + + for i, test := range tests { + v := &cassVersion{} + if err := v.UnmarshalCQL(nil, []byte(test.data)); err != nil { + t.Errorf("%d: %v", i, err) + } else if *v != test.version { + t.Errorf("%d: expected %#+v got %#+v", i, test.version, *v) + } + } +} + +func TestCassVersionBefore(t *testing.T) { + tests := [...]struct { + version cassVersion + major, minor, patch int + }{ + {cassVersion{1, 0, 0}, 0, 0, 0}, + {cassVersion{0, 1, 0}, 0, 0, 0}, + {cassVersion{0, 0, 1}, 0, 0, 0}, + + {cassVersion{1, 0, 0}, 0, 1, 0}, + {cassVersion{0, 1, 0}, 0, 0, 1}, + } + + for i, test := range tests { + if !test.version.Before(test.major, test.minor, test.patch) { + t.Errorf("%d: expected v%d.%d.%d to be before %v", i, test.major, test.minor, test.patch, test.version) + } + } + +} + +func TestIsValidPeer(t *testing.T) { + ring := ringDescriber{} + host := &HostInfo{ + rpcAddress: net.ParseIP("0.0.0.0"), + rack: "myRack", + hostId: "0", + dataCenter: "datacenter", + tokens: []string{"0", "1"}, + } + + if !ring.IsValidPeer(host) { + t.Errorf("expected %+v to be a valid peer", host) + } + + host.rack = "" + if ring.IsValidPeer(host) { + t.Errorf("expected %+v to NOT be a valid peer", host) + } +} + +func TestGetHosts(t *testing.T) { + cluster := createCluster() + session := createSessionFromCluster(cluster, t) + + hosts, partitioner, err := session.hostSource.GetHosts() + + assertTrue(t, "err == nil", err == nil) + assertTrue(t, "len(hosts) == 3", len(hosts) == 3) + assertTrue(t, "len(partitioner) != 0", len(partitioner) != 0) + +} + +func TestGetHostsWithFilter(t *testing.T) { + filterHostIP := net.ParseIP("127.0.0.3") + cluster := createCluster() + + // Filter to remove one of the localhost nodes + cluster.HostFilter = HostFilterFunc(func(host *HostInfo) bool { + if host.ConnectAddress().Equal(filterHostIP) { + return false + } + return true + }) + session := createSessionFromCluster(cluster, t) + + hosts, partitioner, err := session.hostSource.GetHosts() + assertTrue(t, "err == nil", err == nil) + assertTrue(t, "len(hosts) == 2", len(hosts) == 2) + assertTrue(t, "len(partitioner) != 0", len(partitioner) != 0) + for _, host := range hosts { + if host.ConnectAddress().Equal(filterHostIP) { + t.Fatal(fmt.Sprintf("Did not expect to see '%q' in host list", filterHostIP)) + } + } +} + +func TestHostInfo_ConnectAddress(t *testing.T) { + var localhost = net.IPv4(127, 0, 0, 1) + tests := []struct { + name string + connectAddr net.IP + rpcAddr net.IP + broadcastAddr net.IP + peer net.IP + }{ + {name: "rpc_address", rpcAddr: localhost}, + {name: "connect_address", connectAddr: localhost}, + {name: "broadcast_address", broadcastAddr: localhost}, + {name: "peer", peer: localhost}, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + host := &HostInfo{ + connectAddress: test.connectAddr, + rpcAddress: test.rpcAddr, + broadcastAddress: test.broadcastAddr, + peer: test.peer, + } + + if addr := host.ConnectAddress(); !addr.Equal(localhost) { + t.Fatalf("expected ConnectAddress to be %s got %s", localhost, addr) + } + }) + } +} diff --git a/vendor/github.com/gocql/gocql/internal/lru/lru.go b/vendor/github.com/gocql/gocql/internal/lru/lru.go index 9f5719b346..14ca1f4332 100644 --- a/vendor/github.com/gocql/gocql/internal/lru/lru.go +++ b/vendor/github.com/gocql/gocql/internal/lru/lru.go @@ -30,7 +30,7 @@ type Cache struct { // an item is evicted. Zero means no limit. MaxEntries int - // OnEvicted optionally specificies a callback function to be + // OnEvicted optionally specifies a callback function to be // executed when an entry is purged from the cache. OnEvicted func(key string, value interface{}) diff --git a/vendor/github.com/gocql/gocql/internal/lru/lru_test.go b/vendor/github.com/gocql/gocql/internal/lru/lru_test.go new file mode 100644 index 0000000000..1a6414b336 --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/lru/lru_test.go @@ -0,0 +1,72 @@ +/* +Copyright 2015 To gocql authors +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lru + +import ( + "testing" +) + +type simpleStruct struct { + int + string +} + +type complexStruct struct { + int + simpleStruct +} + +var getTests = []struct { + name string + keyToAdd string + keyToGet string + expectedOk bool +}{ + {"string_hit", "mystring", "mystring", true}, + {"string_miss", "mystring", "nonsense", false}, + {"simple_struct_hit", "two", "two", true}, + {"simeple_struct_miss", "two", "noway", false}, +} + +func TestGet(t *testing.T) { + for _, tt := range getTests { + lru := New(0) + lru.Add(tt.keyToAdd, 1234) + val, ok := lru.Get(tt.keyToGet) + if ok != tt.expectedOk { + t.Fatalf("%s: cache hit = %v; want %v", tt.name, ok, !ok) + } else if ok && val != 1234 { + t.Fatalf("%s expected get to return 1234 but got %v", tt.name, val) + } + } +} + +func TestRemove(t *testing.T) { + lru := New(0) + lru.Add("mystring", 1234) + if val, ok := lru.Get("mystring"); !ok { + t.Fatal("TestRemove returned no match") + } else if val != 1234 { + t.Fatalf("TestRemove failed. Expected %d, got %v", 1234, val) + } + + lru.Remove("mystring") + if _, ok := lru.Get("mystring"); ok { + t.Fatal("TestRemove returned a removed entry") + } +} diff --git a/vendor/github.com/gocql/gocql/internal/murmur/murmur_test.go b/vendor/github.com/gocql/gocql/internal/murmur/murmur_test.go new file mode 100644 index 0000000000..0f242d17df --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/murmur/murmur_test.go @@ -0,0 +1,74 @@ +package murmur + +import ( + "strconv" + "testing" +) + +// Test the implementation of murmur3 +func TestMurmur3H1(t *testing.T) { + // these examples are based on adding a index number to a sample string in + // a loop. The expected values were generated by the java datastax murmur3 + // implementation. The number of examples here of increasing lengths ensure + // test coverage of all tail-length branches in the murmur3 algorithm + seriesExpected := [...]uint64{ + 0x0000000000000000, // "" + 0x2ac9debed546a380, // "0" + 0x649e4eaa7fc1708e, // "01" + 0xce68f60d7c353bdb, // "012" + 0x0f95757ce7f38254, // "0123" + 0x0f04e459497f3fc1, // "01234" + 0x88c0a92586be0a27, // "012345" + 0x13eb9fb82606f7a6, // "0123456" + 0x8236039b7387354d, // "01234567" + 0x4c1e87519fe738ba, // "012345678" + 0x3f9652ac3effeb24, // "0123456789" + 0x3f33760ded9006c6, // "01234567890" + 0xaed70a6631854cb1, // "012345678901" + 0x8a299a8f8e0e2da7, // "0123456789012" + 0x624b675c779249a6, // "01234567890123" + 0xa4b203bb1d90b9a3, // "012345678901234" + 0xa3293ad698ecb99a, // "0123456789012345" + 0xbc740023dbd50048, // "01234567890123456" + 0x3fe5ab9837d25cdd, // "012345678901234567" + 0x2d0338c1ca87d132, // "0123456789012345678" + } + sample := "" + for i, expected := range seriesExpected { + assertMurmur3H1(t, []byte(sample), expected) + + sample = sample + strconv.Itoa(i%10) + } + + // Here are some test examples from other driver implementations + assertMurmur3H1(t, []byte("hello"), 0xcbd8a7b341bd9b02) + assertMurmur3H1(t, []byte("hello, world"), 0x342fac623a5ebc8e) + assertMurmur3H1(t, []byte("19 Jan 2038 at 3:14:07 AM"), 0xb89e5988b737affc) + assertMurmur3H1(t, []byte("The quick brown fox jumps over the lazy dog."), 0xcd99481f9ee902c9) +} + +// helper function for testing the murmur3 implementation +func assertMurmur3H1(t *testing.T, data []byte, expected uint64) { + actual := Murmur3H1(data) + if actual != expected { + t.Errorf("Expected h1 = %x for data = %x, but was %x", expected, data, actual) + } +} + +// Benchmark of the performance of the murmur3 implementation +func BenchmarkMurmur3H1(b *testing.B) { + data := make([]byte, 1024) + for i := 0; i < 1024; i++ { + data[i] = byte(i) + } + + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + h1 := Murmur3H1(data) + if h1 != 7627370222079200297 { + b.Fatalf("expected %d got %d", 7627370222079200297, h1) + } + } + }) +} diff --git a/vendor/github.com/gocql/gocql/internal/streams/streams_test.go b/vendor/github.com/gocql/gocql/internal/streams/streams_test.go new file mode 100644 index 0000000000..dbd58d2014 --- /dev/null +++ b/vendor/github.com/gocql/gocql/internal/streams/streams_test.go @@ -0,0 +1,201 @@ +package streams + +import ( + "math" + "strconv" + "sync/atomic" + "testing" +) + +func TestUsesAllStreams(t *testing.T) { + streams := New(1) + + got := make(map[int]struct{}) + + for i := 1; i < streams.NumStreams; i++ { + stream, ok := streams.GetStream() + if !ok { + t.Fatalf("unable to get stream %d", i) + } + + if _, ok = got[stream]; ok { + t.Fatalf("got an already allocated stream: %d", stream) + } + got[stream] = struct{}{} + + if !streams.isSet(stream) { + bucket := atomic.LoadUint64(&streams.streams[bucketOffset(stream)]) + t.Logf("bucket=%d: %s\n", bucket, strconv.FormatUint(bucket, 2)) + t.Fatalf("stream not set: %d", stream) + } + } + + for i := 1; i < streams.NumStreams; i++ { + if _, ok := got[i]; !ok { + t.Errorf("did not use stream %d", i) + } + } + if _, ok := got[0]; ok { + t.Fatal("expected to not use stream 0") + } + + for i, bucket := range streams.streams { + if bucket != math.MaxUint64 { + t.Errorf("did not use all streams in offset=%d bucket=%s", i, bitfmt(bucket)) + } + } +} + +func TestFullStreams(t *testing.T) { + streams := New(1) + for i := range streams.streams { + streams.streams[i] = math.MaxUint64 + } + + stream, ok := streams.GetStream() + if ok { + t.Fatalf("should not get stream when all in use: stream=%d", stream) + } +} + +func TestClearStreams(t *testing.T) { + streams := New(1) + for i := range streams.streams { + streams.streams[i] = math.MaxUint64 + } + streams.inuseStreams = int32(streams.NumStreams) + + for i := 0; i < streams.NumStreams; i++ { + streams.Clear(i) + } + + for i, bucket := range streams.streams { + if bucket != 0 { + t.Errorf("did not clear streams in offset=%d bucket=%s", i, bitfmt(bucket)) + } + } +} + +func TestDoubleClear(t *testing.T) { + streams := New(1) + stream, ok := streams.GetStream() + if !ok { + t.Fatal("did not get stream") + } + + if !streams.Clear(stream) { + t.Fatalf("stream not indicated as in use: %d", stream) + } + if streams.Clear(stream) { + t.Fatalf("stream not as in use after clear: %d", stream) + } +} + +func BenchmarkConcurrentUse(b *testing.B) { + streams := New(2) + + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + stream, ok := streams.GetStream() + if !ok { + b.Error("unable to get stream") + return + } + + if !streams.Clear(stream) { + b.Errorf("stream was already cleared: %d", stream) + return + } + } + }) +} + +func TestStreamOffset(t *testing.T) { + tests := [...]struct { + n int + off uint64 + }{ + {0, 63}, + {1, 62}, + {2, 61}, + {3, 60}, + {63, 0}, + {64, 63}, + + {128, 63}, + } + + for _, test := range tests { + if off := streamOffset(test.n); off != test.off { + t.Errorf("n=%d expected %d got %d", test.n, off, test.off) + } + } +} + +func TestIsSet(t *testing.T) { + tests := [...]struct { + stream int + bucket uint64 + set bool + }{ + {0, 0, false}, + {0, 1 << 63, true}, + {1, 0, false}, + {1, 1 << 62, true}, + {63, 1, true}, + {64, 1 << 63, true}, + {0, 0x8000000000000000, true}, + } + + for i, test := range tests { + if set := isSet(test.bucket, test.stream); set != test.set { + t.Errorf("[%d] stream=%d expected %v got %v", i, test.stream, test.set, set) + } + } + + for i := 0; i < bucketBits; i++ { + if !isSet(math.MaxUint64, i) { + var shift uint64 = math.MaxUint64 >> streamOffset(i) + t.Errorf("expected isSet for all i=%d got=%d", i, shift) + } + } +} + +func TestBucketOfset(t *testing.T) { + tests := [...]struct { + n int + bucket int + }{ + {0, 0}, + {1, 0}, + {63, 0}, + {64, 1}, + } + + for _, test := range tests { + if bucket := bucketOffset(test.n); bucket != test.bucket { + t.Errorf("n=%d expected %v got %v", test.n, test.bucket, bucket) + } + } +} + +func TestStreamFromBucket(t *testing.T) { + tests := [...]struct { + bucket int + pos int + stream int + }{ + {0, 0, 0}, + {0, 1, 1}, + {0, 2, 2}, + {0, 63, 63}, + {1, 0, 64}, + {1, 1, 65}, + } + + for _, test := range tests { + if stream := streamFromBucket(test.bucket, test.pos); stream != test.stream { + t.Errorf("bucket=%d pos=%d expected %v got %v", test.bucket, test.pos, test.stream, stream) + } + } +} diff --git a/vendor/github.com/gocql/gocql/marshal_test.go b/vendor/github.com/gocql/gocql/marshal_test.go new file mode 100644 index 0000000000..e273ef6f44 --- /dev/null +++ b/vendor/github.com/gocql/gocql/marshal_test.go @@ -0,0 +1,1403 @@ +// +build all unit + +package gocql + +import ( + "bytes" + "math" + "math/big" + "net" + "reflect" + "strings" + "testing" + "time" + + "gopkg.in/inf.v0" +) + +type AliasInt int + +var marshalTests = []struct { + Info TypeInfo + Data []byte + Value interface{} + MarshalError error + UnmarshalError error +}{ + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte("hello world"), + []byte("hello world"), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte("hello world"), + "hello world", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte(nil), + []byte(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte("hello world"), + MyString("hello world"), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte("HELLO WORLD"), + CustomString("hello world"), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBlob}, + []byte("hello\x00"), + []byte("hello\x00"), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBlob}, + []byte(nil), + []byte(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimeUUID}, + []byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0}, + func() UUID { + x, _ := UUIDFromBytes([]byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0}) + return x + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimeUUID}, + []byte{0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0}, + []byte{0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0}, + MarshalError("can not marshal []byte 6 bytes long into timeuuid, must be exactly 16 bytes long"), + UnmarshalError("Unable to parse UUID: UUIDs must be exactly 16 bytes long"), + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x00\x00\x00\x00"), + 0, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x01\x02\x03\x04"), + int(16909060), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x01\x02\x03\x04"), + AliasInt(16909060), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x80\x00\x00\x00"), + int32(math.MinInt32), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x7f\xff\xff\xff"), + int32(math.MaxInt32), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x00\x00\x00\x00"), + "0", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x01\x02\x03\x04"), + "16909060", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x80\x00\x00\x00"), + "-2147483648", // math.MinInt32 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x7f\xff\xff\xff"), + "2147483647", // math.MaxInt32 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x00\x00\x00\x00\x00\x00\x00\x00"), + 0, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x01\x02\x03\x04\x05\x06\x07\x08"), + 72623859790382856, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x80\x00\x00\x00\x00\x00\x00\x00"), + int64(math.MinInt64), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x7f\xff\xff\xff\xff\xff\xff\xff"), + int64(math.MaxInt64), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x00\x00\x00\x00\x00\x00\x00\x00"), + "0", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x01\x02\x03\x04\x05\x06\x07\x08"), + "72623859790382856", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x80\x00\x00\x00\x00\x00\x00\x00"), + "-9223372036854775808", // math.MinInt64 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\x7f\xff\xff\xff\xff\xff\xff\xff"), + "9223372036854775807", // math.MaxInt64 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBoolean}, + []byte("\x00"), + false, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBoolean}, + []byte("\x01"), + true, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeFloat}, + []byte("\x40\x49\x0f\xdb"), + float32(3.14159265), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDouble}, + []byte("\x40\x09\x21\xfb\x53\xc8\xd4\xf1"), + float64(3.14159265), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x00\x00"), + inf.NewDec(0, 0), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x00\x64"), + inf.NewDec(100, 0), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x02\x19"), + decimalize("0.25"), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x13\xD5\a;\x20\x14\xA2\x91"), + decimalize("-0.0012095473475870063"), // From the iconara/cql-rb test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x13*\xF8\xC4\xDF\xEB]o"), + decimalize("0.0012095473475870063"), // From the iconara/cql-rb test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x12\xF2\xD8\x02\xB6R\x7F\x99\xEE\x98#\x99\xA9V"), + decimalize("-1042342234234.123423435647768234"), // From the iconara/cql-rb test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\r\nJ\x04\"^\x91\x04\x8a\xb1\x18\xfe"), + decimalize("1243878957943.1234124191998"), // From the datastax/python-driver test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x06\xe5\xde]\x98Y"), + decimalize("-112233.441191"), // From the datastax/python-driver test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x14\x00\xfa\xce"), + decimalize("0.00000000000000064206"), // From the datastax/python-driver test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\x00\x00\x00\x14\xff\x052"), + decimalize("-0.00000000000000064206"), // From the datastax/python-driver test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDecimal}, + []byte("\xff\xff\xff\x9c\x00\xfa\xce"), + inf.NewDec(64206, -100), // From the datastax/python-driver test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"), + time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"), + int64(1376387523000), + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeList}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"), + []int{1, 2}, + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeList}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"), + [2]int{1, 2}, + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeSet}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"), + []int{1, 2}, + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeSet}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte{0, 0}, // encoding of a list should always include the size of the collection + []int{}, + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeMap}, + Key: NativeType{proto: 2, typ: TypeVarchar}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte("\x00\x01\x00\x03foo\x00\x04\x00\x00\x00\x01"), + map[string]int{"foo": 1}, + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeMap}, + Key: NativeType{proto: 2, typ: TypeVarchar}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte{0, 0}, + map[string]int{}, + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeList}, + Elem: NativeType{proto: 2, typ: TypeVarchar}, + }, + bytes.Join([][]byte{ + []byte("\x00\x01\xFF\xFF"), + bytes.Repeat([]byte("X"), 65535)}, []byte("")), + []string{strings.Repeat("X", 65535)}, + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeMap}, + Key: NativeType{proto: 2, typ: TypeVarchar}, + Elem: NativeType{proto: 2, typ: TypeVarchar}, + }, + bytes.Join([][]byte{ + []byte("\x00\x01\xFF\xFF"), + bytes.Repeat([]byte("X"), 65535), + []byte("\xFF\xFF"), + bytes.Repeat([]byte("Y"), 65535)}, []byte("")), + map[string]string{ + strings.Repeat("X", 65535): strings.Repeat("Y", 65535), + }, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarint}, + []byte("\x00"), + 0, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarint}, + []byte("\x37\xE2\x3C\xEC"), + int32(937573612), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarint}, + []byte("\x37\xE2\x3C\xEC"), + big.NewInt(937573612), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarint}, + []byte("\x03\x9EV \x15\f\x03\x9DK\x18\xCDI\\$?\a["), + bigintize("1231312312331283012830129382342342412123"), // From the iconara/cql-rb test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarint}, + []byte("\xC9v\x8D:\x86"), + big.NewInt(-234234234234), // From the iconara/cql-rb test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarint}, + []byte("f\x1e\xfd\xf2\xe3\xb1\x9f|\x04_\x15"), + bigintize("123456789123456789123456789"), // From the datastax/python-driver test suite + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarint}, + []byte(nil), + nil, + nil, + UnmarshalError("can not unmarshal into non-pointer "), + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\x7F\x00\x00\x01"), + net.ParseIP("127.0.0.1").To4(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\xFF\xFF\xFF\xFF"), + net.ParseIP("255.255.255.255").To4(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\x7F\x00\x00\x01"), + "127.0.0.1", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\xFF\xFF\xFF\xFF"), + "255.255.255.255", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\x21\xDA\x00\xd3\x00\x00\x2f\x3b\x02\xaa\x00\xff\xfe\x28\x9c\x5a"), + "21da:d3:0:2f3b:2aa:ff:fe28:9c5a", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83\x29"), + "fe80::202:b3ff:fe1e:8329", + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\x21\xDA\x00\xd3\x00\x00\x2f\x3b\x02\xaa\x00\xff\xfe\x28\x9c\x5a"), + net.ParseIP("21da:d3:0:2f3b:2aa:ff:fe28:9c5a"), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x02\xb3\xff\xfe\x1e\x83\x29"), + net.ParseIP("fe80::202:b3ff:fe1e:8329"), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte(nil), + nil, + nil, + UnmarshalError("can not unmarshal into non-pointer "), + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte("nullable string"), + func() *string { + value := "nullable string" + return &value + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte(nil), + (*string)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\x7f\xff\xff\xff"), + func() *int { + var value int = math.MaxInt32 + return &value + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte(nil), + (*int)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimeUUID}, + []byte{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0}, + &UUID{0x3d, 0xcd, 0x98, 0x0, 0xf3, 0xd9, 0x11, 0xbf, 0x86, 0xd4, 0xb8, 0xe8, 0x56, 0x2c, 0xc, 0xd0}, + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimeUUID}, + []byte(nil), + (*UUID)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"), + func() *time.Time { + t := time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC) + return &t + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTimestamp}, + []byte(nil), + (*time.Time)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBoolean}, + []byte("\x00"), + func() *bool { + b := false + return &b + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBoolean}, + []byte("\x01"), + func() *bool { + b := true + return &b + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBoolean}, + []byte(nil), + (*bool)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeFloat}, + []byte("\x40\x49\x0f\xdb"), + func() *float32 { + f := float32(3.14159265) + return &f + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeFloat}, + []byte(nil), + (*float32)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDouble}, + []byte("\x40\x09\x21\xfb\x53\xc8\xd4\xf1"), + func() *float64 { + d := float64(3.14159265) + return &d + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeDouble}, + []byte(nil), + (*float64)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte("\x7F\x00\x00\x01"), + func() *net.IP { + ip := net.ParseIP("127.0.0.1").To4() + return &ip + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInet}, + []byte(nil), + (*net.IP)(nil), + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeList}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte("\x00\x02\x00\x04\x00\x00\x00\x01\x00\x04\x00\x00\x00\x02"), + func() *[]int { + l := []int{1, 2} + return &l + }(), + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeList}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte(nil), + (*[]int)(nil), + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeMap}, + Key: NativeType{proto: 2, typ: TypeVarchar}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte("\x00\x01\x00\x03foo\x00\x04\x00\x00\x00\x01"), + func() *map[string]int { + m := map[string]int{"foo": 1} + return &m + }(), + nil, + nil, + }, + { + CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeMap}, + Key: NativeType{proto: 2, typ: TypeVarchar}, + Elem: NativeType{proto: 2, typ: TypeInt}, + }, + []byte(nil), + (*map[string]int)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte("HELLO WORLD"), + func() *CustomString { + customString := CustomString("hello world") + return &customString + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte(nil), + (*CustomString)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeSmallInt}, + []byte("\x7f\xff"), + 32767, // math.MaxInt16 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeSmallInt}, + []byte("\x7f\xff"), + "32767", // math.MaxInt16 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeSmallInt}, + []byte("\x00\x01"), + int16(1), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeSmallInt}, + []byte("\xff\xff"), + int16(-1), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeSmallInt}, + []byte("\xff\xff"), + uint16(65535), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\x7f"), + 127, // math.MaxInt8 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\x7f"), + "127", // math.MaxInt8 + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\x01"), + int16(1), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\xff"), + int16(-1), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\xff"), + uint8(255), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\xff"), + uint64(255), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\xff"), + uint32(255), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\xff"), + uint16(255), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTinyInt}, + []byte("\xff"), + uint(255), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBigInt}, + []byte("\xff\xff\xff\xff\xff\xff\xff\xff"), + uint64(math.MaxUint64), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeInt}, + []byte("\xff\xff\xff\xff"), + uint32(math.MaxUint32), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeBlob}, + []byte(nil), + ([]byte)(nil), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeVarchar}, + []byte{}, + func() interface{} { + var s string + return &s + }(), + nil, + nil, + }, + { + NativeType{proto: 2, typ: TypeTime}, + encBigInt(1000), + time.Duration(1000), + nil, + nil, + }, +} + +func decimalize(s string) *inf.Dec { + i, _ := new(inf.Dec).SetString(s) + return i +} + +func bigintize(s string) *big.Int { + i, _ := new(big.Int).SetString(s, 10) + return i +} + +func TestMarshal_Encode(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalError == nil { + data, err := Marshal(test.Info, test.Value) + if err != nil { + t.Errorf("marshalTest[%d]: %v", i, err) + continue + } + if !bytes.Equal(data, test.Data) { + t.Errorf("marshalTest[%d]: expected %q, got %q (%#v)", i, test.Data, data, test.Value) + } + } else { + if _, err := Marshal(test.Info, test.Value); err != test.MarshalError { + t.Errorf("unmarshalTest[%d] (%v=>%t): %#v returned error %#v, want %#v.", i, test.Info, test.Value, test.Value, err, test.MarshalError) + } + } + } +} + +func TestMarshal_Decode(t *testing.T) { + for i, test := range marshalTests { + if test.UnmarshalError == nil { + v := reflect.New(reflect.TypeOf(test.Value)) + err := Unmarshal(test.Info, test.Data, v.Interface()) + if err != nil { + t.Errorf("unmarshalTest[%d] (%v=>%T): %v", i, test.Info, test.Value, err) + continue + } + if !reflect.DeepEqual(v.Elem().Interface(), test.Value) { + t.Errorf("unmarshalTest[%d] (%v=>%T): expected %#v, got %#v.", i, test.Info, test.Value, test.Value, v.Elem().Interface()) + } + } else { + if err := Unmarshal(test.Info, test.Data, test.Value); err != test.UnmarshalError { + t.Errorf("unmarshalTest[%d] (%v=>%t): %#v returned error %#v, want %#v.", i, test.Info, test.Value, test.Value, err, test.UnmarshalError) + } + } + } +} + +func TestMarshalVarint(t *testing.T) { + varintTests := []struct { + Value interface{} + Marshaled []byte + Unmarshaled *big.Int + }{ + { + Value: int8(0), + Marshaled: []byte("\x00"), + Unmarshaled: big.NewInt(0), + }, + { + Value: uint8(255), + Marshaled: []byte("\x00\xFF"), + Unmarshaled: big.NewInt(255), + }, + { + Value: int8(-1), + Marshaled: []byte("\xFF"), + Unmarshaled: big.NewInt(-1), + }, + { + Value: big.NewInt(math.MaxInt32), + Marshaled: []byte("\x7F\xFF\xFF\xFF"), + Unmarshaled: big.NewInt(math.MaxInt32), + }, + { + Value: big.NewInt(int64(math.MaxInt32) + 1), + Marshaled: []byte("\x00\x80\x00\x00\x00"), + Unmarshaled: big.NewInt(int64(math.MaxInt32) + 1), + }, + { + Value: big.NewInt(math.MinInt32), + Marshaled: []byte("\x80\x00\x00\x00"), + Unmarshaled: big.NewInt(math.MinInt32), + }, + { + Value: big.NewInt(int64(math.MinInt32) - 1), + Marshaled: []byte("\xFF\x7F\xFF\xFF\xFF"), + Unmarshaled: big.NewInt(int64(math.MinInt32) - 1), + }, + { + Value: math.MinInt64, + Marshaled: []byte("\x80\x00\x00\x00\x00\x00\x00\x00"), + Unmarshaled: big.NewInt(math.MinInt64), + }, + { + Value: uint64(math.MaxInt64) + 1, + Marshaled: []byte("\x00\x80\x00\x00\x00\x00\x00\x00\x00"), + Unmarshaled: bigintize("9223372036854775808"), + }, + { + Value: bigintize("2361183241434822606848"), // 2**71 + Marshaled: []byte("\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00"), + Unmarshaled: bigintize("2361183241434822606848"), + }, + { + Value: bigintize("-9223372036854775809"), // -2**63 - 1 + Marshaled: []byte("\xFF\x7F\xFF\xFF\xFF\xFF\xFF\xFF\xFF"), + Unmarshaled: bigintize("-9223372036854775809"), + }, + } + + for i, test := range varintTests { + data, err := Marshal(NativeType{proto: 2, typ: TypeVarint}, test.Value) + if err != nil { + t.Errorf("error marshaling varint: %v (test #%d)", err, i) + } + + if !bytes.Equal(test.Marshaled, data) { + t.Errorf("marshaled varint mismatch: expected %v, got %v (test #%d)", test.Marshaled, data, i) + } + + binder := new(big.Int) + err = Unmarshal(NativeType{proto: 2, typ: TypeVarint}, test.Marshaled, binder) + if err != nil { + t.Errorf("error unmarshaling varint: %v (test #%d)", err, i) + } + + if test.Unmarshaled.Cmp(binder) != 0 { + t.Errorf("unmarshaled varint mismatch: expected %v, got %v (test #%d)", test.Unmarshaled, binder, i) + } + } + + varintUint64Tests := []struct { + Value interface{} + Marshaled []byte + Unmarshaled uint64 + }{ + { + Value: int8(0), + Marshaled: []byte("\x00"), + Unmarshaled: 0, + }, + { + Value: uint8(255), + Marshaled: []byte("\x00\xFF"), + Unmarshaled: 255, + }, + { + Value: big.NewInt(math.MaxInt32), + Marshaled: []byte("\x7F\xFF\xFF\xFF"), + Unmarshaled: uint64(math.MaxInt32), + }, + { + Value: big.NewInt(int64(math.MaxInt32) + 1), + Marshaled: []byte("\x00\x80\x00\x00\x00"), + Unmarshaled: uint64(int64(math.MaxInt32) + 1), + }, + { + Value: uint64(math.MaxInt64) + 1, + Marshaled: []byte("\x00\x80\x00\x00\x00\x00\x00\x00\x00"), + Unmarshaled: 9223372036854775808, + }, + { + Value: uint64(math.MaxUint64), + Marshaled: []byte("\x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF"), + Unmarshaled: uint64(math.MaxUint64), + }, + } + + for i, test := range varintUint64Tests { + data, err := Marshal(NativeType{proto: 2, typ: TypeVarint}, test.Value) + if err != nil { + t.Errorf("error marshaling varint: %v (test #%d)", err, i) + } + + if !bytes.Equal(test.Marshaled, data) { + t.Errorf("marshaled varint mismatch: expected %v, got %v (test #%d)", test.Marshaled, data, i) + } + + var binder uint64 + err = Unmarshal(NativeType{proto: 2, typ: TypeVarint}, test.Marshaled, &binder) + if err != nil { + t.Errorf("error unmarshaling varint to uint64: %v (test #%d)", err, i) + } + + if test.Unmarshaled != binder { + t.Errorf("unmarshaled varint mismatch: expected %v, got %v (test #%d)", test.Unmarshaled, binder, i) + } + } +} + +func equalStringSlice(leftList, rightList []string) bool { + if len(leftList) != len(rightList) { + return false + } + for index := range leftList { + if rightList[index] != leftList[index] { + return false + } + } + return true +} + +func TestMarshalList(t *testing.T) { + typeInfo := CollectionType{ + NativeType: NativeType{proto: 2, typ: TypeList}, + Elem: NativeType{proto: 2, typ: TypeVarchar}, + } + + sourceLists := [][]string{ + {"valueA"}, + {"valueA", "valueB"}, + {"valueB"}, + } + + listDatas := [][]byte{} + + for _, list := range sourceLists { + listData, marshalErr := Marshal(typeInfo, list) + if nil != marshalErr { + t.Errorf("Error marshal %+v of type %+v: %s", list, typeInfo, marshalErr) + } + listDatas = append(listDatas, listData) + } + + outputLists := [][]string{} + + var outputList []string + + for _, listData := range listDatas { + if unmarshalErr := Unmarshal(typeInfo, listData, &outputList); nil != unmarshalErr { + t.Error(unmarshalErr) + } + outputLists = append(outputLists, outputList) + } + + for index, sourceList := range sourceLists { + outputList := outputLists[index] + if !equalStringSlice(sourceList, outputList) { + t.Errorf("Lists %+v not equal to lists %+v, but should", sourceList, outputList) + } + } +} + +type CustomString string + +func (c CustomString) MarshalCQL(info TypeInfo) ([]byte, error) { + return []byte(strings.ToUpper(string(c))), nil +} +func (c *CustomString) UnmarshalCQL(info TypeInfo, data []byte) error { + *c = CustomString(strings.ToLower(string(data))) + return nil +} + +type MyString string + +type MyInt int + +var typeLookupTest = []struct { + TypeName string + ExpectedType Type +}{ + {"AsciiType", TypeAscii}, + {"LongType", TypeBigInt}, + {"BytesType", TypeBlob}, + {"BooleanType", TypeBoolean}, + {"CounterColumnType", TypeCounter}, + {"DecimalType", TypeDecimal}, + {"DoubleType", TypeDouble}, + {"FloatType", TypeFloat}, + {"Int32Type", TypeInt}, + {"DateType", TypeTimestamp}, + {"TimestampType", TypeTimestamp}, + {"UUIDType", TypeUUID}, + {"UTF8Type", TypeVarchar}, + {"IntegerType", TypeVarint}, + {"TimeUUIDType", TypeTimeUUID}, + {"InetAddressType", TypeInet}, + {"MapType", TypeMap}, + {"ListType", TypeList}, + {"SetType", TypeSet}, + {"unknown", TypeCustom}, + {"ShortType", TypeSmallInt}, + {"ByteType", TypeTinyInt}, +} + +func testType(t *testing.T, cassType string, expectedType Type) { + if computedType := getApacheCassandraType(apacheCassandraTypePrefix + cassType); computedType != expectedType { + t.Errorf("Cassandra custom type lookup for %s failed. Expected %s, got %s.", cassType, expectedType.String(), computedType.String()) + } +} + +func TestLookupCassType(t *testing.T) { + for _, lookupTest := range typeLookupTest { + testType(t, lookupTest.TypeName, lookupTest.ExpectedType) + } +} + +type MyPointerMarshaler struct{} + +func (m *MyPointerMarshaler) MarshalCQL(_ TypeInfo) ([]byte, error) { + return []byte{42}, nil +} + +func TestMarshalPointer(t *testing.T) { + m := &MyPointerMarshaler{} + typ := NativeType{proto: 2, typ: TypeInt} + + data, err := Marshal(typ, m) + + if err != nil { + t.Errorf("Pointer marshaling failed. Error: %s", err) + } + if len(data) != 1 || data[0] != 42 { + t.Errorf("Pointer marshaling failed. Expected %+v, got %+v", []byte{42}, data) + } +} + +func TestMarshalTimestamp(t *testing.T) { + var marshalTimestampTests = []struct { + Info TypeInfo + Data []byte + Value interface{} + }{ + { + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"), + time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC), + }, + { + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\x00\x00\x01\x40\x77\x16\xe1\xb8"), + int64(1376387523000), + }, + { + // 9223372036854 is the maximum time representable in ms since the epoch + // with int64 if using UnixNano to convert + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\x00\x00\x08\x63\x7b\xd0\x5a\xf6"), + time.Date(2262, time.April, 11, 23, 47, 16, 854775807, time.UTC), + }, + { + // One nanosecond after causes overflow when using UnixNano + // Instead it should resolve to the same time in ms + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\x00\x00\x08\x63\x7b\xd0\x5a\xf6"), + time.Date(2262, time.April, 11, 23, 47, 16, 854775808, time.UTC), + }, + { + // -9223372036855 is the minimum time representable in ms since the epoch + // with int64 if using UnixNano to convert + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\xff\xff\xf7\x9c\x84\x2f\xa5\x09"), + time.Date(1677, time.September, 21, 00, 12, 43, 145224192, time.UTC), + }, + { + // One nanosecond earlier causes overflow when using UnixNano + // it should resolve to the same time in ms + NativeType{proto: 2, typ: TypeTimestamp}, + []byte("\xff\xff\xf7\x9c\x84\x2f\xa5\x09"), + time.Date(1677, time.September, 21, 00, 12, 43, 145224191, time.UTC), + }, + { + // Store the zero time as a blank slice + NativeType{proto: 2, typ: TypeTimestamp}, + []byte{}, + time.Time{}, + }, + } + + for i, test := range marshalTimestampTests { + t.Log(i, test) + data, err := Marshal(test.Info, test.Value) + if err != nil { + t.Errorf("marshalTest[%d]: %v", i, err) + continue + } + if !bytes.Equal(data, test.Data) { + t.Errorf("marshalTest[%d]: expected %x (%v), got %x (%v) for time %s", i, + test.Data, decBigInt(test.Data), data, decBigInt(data), test.Value) + } + } +} + +func TestMarshalTuple(t *testing.T) { + info := TupleTypeInfo{ + NativeType: NativeType{proto: 3, typ: TypeTuple}, + Elems: []TypeInfo{ + NativeType{proto: 3, typ: TypeVarchar}, + NativeType{proto: 3, typ: TypeVarchar}, + }, + } + + expectedData := []byte("\x00\x00\x00\x03foo\x00\x00\x00\x03bar") + value := []interface{}{"foo", "bar"} + + data, err := Marshal(info, value) + if err != nil { + t.Errorf("marshalTest: %v", err) + return + } + + if !bytes.Equal(data, expectedData) { + t.Errorf("marshalTest: expected %x (%v), got %x (%v)", + expectedData, decBigInt(expectedData), data, decBigInt(data)) + return + } + + var s1, s2 string + val := []interface{}{&s1, &s2} + err = Unmarshal(info, expectedData, val) + if err != nil { + t.Errorf("unmarshalTest: %v", err) + return + } + + if s1 != "foo" || s2 != "bar" { + t.Errorf("unmarshalTest: expected [foo, bar], got [%s, %s]", s1, s2) + } +} + +func TestMarshalNil(t *testing.T) { + types := []Type{ + TypeAscii, + TypeBlob, + TypeBoolean, + TypeBigInt, + TypeCounter, + TypeDecimal, + TypeDouble, + TypeFloat, + TypeInt, + TypeTimestamp, + TypeUUID, + TypeVarchar, + TypeVarint, + TypeTimeUUID, + TypeInet, + } + + for _, typ := range types { + data, err := Marshal(NativeType{proto: 3, typ: typ}, nil) + if err != nil { + t.Errorf("unable to marshal nil %v: %v\n", typ, err) + } else if data != nil { + t.Errorf("expected to get nil byte for nil %v got % X", typ, data) + } + } +} + +func TestUnmarshalInetCopyBytes(t *testing.T) { + data := []byte{127, 0, 0, 1} + var ip net.IP + if err := unmarshalInet(NativeType{proto: 2, typ: TypeInet}, data, &ip); err != nil { + t.Fatal(err) + } + + copy(data, []byte{0xFF, 0xFF, 0xFF, 0xFF}) + ip2 := net.IP(data) + if !ip.Equal(net.IPv4(127, 0, 0, 1)) { + t.Fatalf("IP memory shared with data: ip=%v ip2=%v", ip, ip2) + } +} + +func TestUnmarshalDate(t *testing.T) { + data := []uint8{0x80, 0x0, 0x43, 0x31} + var date time.Time + if err := unmarshalDate(NativeType{proto: 2, typ: TypeDate}, data, &date); err != nil { + t.Fatal(err) + } + + expectedDate := "2017-02-04" + formattedDate := date.Format("2006-01-02") + if expectedDate != formattedDate { + t.Errorf("marshalTest: expected %v, got %v", expectedDate, formattedDate) + return + } +} + +func TestMarshalDate(t *testing.T) { + now := time.Now() + timestamp := now.UnixNano() / int64(time.Millisecond) + expectedData := encInt(int32(timestamp/86400000 + int64(1<<31))) + var marshalDateTests = []struct { + Info TypeInfo + Data []byte + Value interface{} + }{ + { + NativeType{proto: 4, typ: TypeDate}, + expectedData, + timestamp, + }, + { + NativeType{proto: 4, typ: TypeDate}, + expectedData, + now, + }, + { + NativeType{proto: 4, typ: TypeDate}, + expectedData, + &now, + }, + { + NativeType{proto: 4, typ: TypeDate}, + expectedData, + now.Format("2006-01-02"), + }, + } + + for i, test := range marshalDateTests { + t.Log(i, test) + data, err := Marshal(test.Info, test.Value) + if err != nil { + t.Errorf("marshalTest[%d]: %v", i, err) + continue + } + if !bytes.Equal(data, test.Data) { + t.Errorf("marshalTest[%d]: expected %x (%v), got %x (%v) for time %s", i, + test.Data, decInt(test.Data), data, decInt(data), test.Value) + } + } +} diff --git a/vendor/github.com/gocql/gocql/metadata_test.go b/vendor/github.com/gocql/gocql/metadata_test.go new file mode 100644 index 0000000000..cc4631acc1 --- /dev/null +++ b/vendor/github.com/gocql/gocql/metadata_test.go @@ -0,0 +1,815 @@ +// Copyright (c) 2015 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "strconv" + "testing" +) + +// Tests V1 and V2 metadata "compilation" from example data which might be returned +// from metadata schema queries (see getKeyspaceMetadata, getTableMetadata, and getColumnMetadata) +func TestCompileMetadata(t *testing.T) { + // V1 tests - these are all based on real examples from the integration test ccm cluster + keyspace := &KeyspaceMetadata{ + Name: "V1Keyspace", + } + tables := []TableMetadata{ + { + // This table, found in the system keyspace, has no key aliases or column aliases + Keyspace: "V1Keyspace", + Name: "Schema", + KeyValidator: "org.apache.cassandra.db.marshal.BytesType", + Comparator: "org.apache.cassandra.db.marshal.UTF8Type", + DefaultValidator: "org.apache.cassandra.db.marshal.BytesType", + KeyAliases: []string{}, + ColumnAliases: []string{}, + ValueAlias: "", + }, + { + // This table, found in the system keyspace, has key aliases, column aliases, and a value alias. + Keyspace: "V1Keyspace", + Name: "hints", + KeyValidator: "org.apache.cassandra.db.marshal.UUIDType", + Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.TimeUUIDType,org.apache.cassandra.db.marshal.Int32Type)", + DefaultValidator: "org.apache.cassandra.db.marshal.BytesType", + KeyAliases: []string{"target_id"}, + ColumnAliases: []string{"hint_id", "message_version"}, + ValueAlias: "mutation", + }, + { + // This table, found in the system keyspace, has a comparator with collections, but no column aliases + Keyspace: "V1Keyspace", + Name: "peers", + KeyValidator: "org.apache.cassandra.db.marshal.InetAddressType", + Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(746f6b656e73:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)))", + DefaultValidator: "org.apache.cassandra.db.marshal.BytesType", + KeyAliases: []string{"peer"}, + ColumnAliases: []string{}, + ValueAlias: "", + }, + { + // This table, found in the system keyspace, has a column alias, but not a composite comparator + Keyspace: "V1Keyspace", + Name: "IndexInfo", + KeyValidator: "org.apache.cassandra.db.marshal.UTF8Type", + Comparator: "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)", + DefaultValidator: "org.apache.cassandra.db.marshal.BytesType", + KeyAliases: []string{"table_name"}, + ColumnAliases: []string{"index_name"}, + ValueAlias: "", + }, + { + // This table, found in the gocql_test keyspace following an integration test run, has a composite comparator with collections as well as a column alias + Keyspace: "V1Keyspace", + Name: "wiki_page", + KeyValidator: "org.apache.cassandra.db.marshal.UTF8Type", + Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.TimeUUIDType,org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(74616773:org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type),6174746163686d656e7473:org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.BytesType)))", + DefaultValidator: "org.apache.cassandra.db.marshal.BytesType", + KeyAliases: []string{"title"}, + ColumnAliases: []string{"revid"}, + ValueAlias: "", + }, + { + // This is a made up example with multiple unnamed aliases + Keyspace: "V1Keyspace", + Name: "no_names", + KeyValidator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UUIDType,org.apache.cassandra.db.marshal.UUIDType)", + Comparator: "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.Int32Type)", + DefaultValidator: "org.apache.cassandra.db.marshal.BytesType", + KeyAliases: []string{}, + ColumnAliases: []string{}, + ValueAlias: "", + }, + } + columns := []ColumnMetadata{ + // Here are the regular columns from the peers table for testing regular columns + {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "data_center", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"}, + {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "host_id", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType"}, + {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "rack", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"}, + {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "release_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type"}, + {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "rpc_address", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.InetAddressType"}, + {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "schema_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType"}, + {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "tokens", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)"}, + } + compileMetadata(1, keyspace, tables, columns) + assertKeyspaceMetadata( + t, + keyspace, + &KeyspaceMetadata{ + Name: "V1Keyspace", + Tables: map[string]*TableMetadata{ + "Schema": { + PartitionKey: []*ColumnMetadata{ + { + Name: "key", + Type: NativeType{typ: TypeBlob}, + }, + }, + ClusteringColumns: []*ColumnMetadata{}, + Columns: map[string]*ColumnMetadata{ + "key": { + Name: "key", + Type: NativeType{typ: TypeBlob}, + Kind: ColumnPartitionKey, + }, + }, + }, + "hints": { + PartitionKey: []*ColumnMetadata{ + { + Name: "target_id", + Type: NativeType{typ: TypeUUID}, + }, + }, + ClusteringColumns: []*ColumnMetadata{ + { + Name: "hint_id", + Type: NativeType{typ: TypeTimeUUID}, + Order: ASC, + }, + { + Name: "message_version", + Type: NativeType{typ: TypeInt}, + Order: ASC, + }, + }, + Columns: map[string]*ColumnMetadata{ + "target_id": { + Name: "target_id", + Type: NativeType{typ: TypeUUID}, + Kind: ColumnPartitionKey, + }, + "hint_id": { + Name: "hint_id", + Type: NativeType{typ: TypeTimeUUID}, + Order: ASC, + Kind: ColumnClusteringKey, + }, + "message_version": { + Name: "message_version", + Type: NativeType{typ: TypeInt}, + Order: ASC, + Kind: ColumnClusteringKey, + }, + "mutation": { + Name: "mutation", + Type: NativeType{typ: TypeBlob}, + Kind: ColumnRegular, + }, + }, + }, + "peers": { + PartitionKey: []*ColumnMetadata{ + { + Name: "peer", + Type: NativeType{typ: TypeInet}, + }, + }, + ClusteringColumns: []*ColumnMetadata{}, + Columns: map[string]*ColumnMetadata{ + "peer": { + Name: "peer", + Type: NativeType{typ: TypeInet}, + Kind: ColumnPartitionKey, + }, + "data_center": {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "data_center", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}}, + "host_id": {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "host_id", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType", Type: NativeType{typ: TypeUUID}}, + "rack": {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "rack", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}}, + "release_version": {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "release_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UTF8Type", Type: NativeType{typ: TypeVarchar}}, + "rpc_address": {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "rpc_address", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.InetAddressType", Type: NativeType{typ: TypeInet}}, + "schema_version": {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "schema_version", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.UUIDType", Type: NativeType{typ: TypeUUID}}, + "tokens": {Keyspace: "V1Keyspace", Table: "peers", Kind: ColumnRegular, Name: "tokens", ComponentIndex: 0, Validator: "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.UTF8Type)", Type: CollectionType{NativeType: NativeType{typ: TypeSet}}}, + }, + }, + "IndexInfo": { + PartitionKey: []*ColumnMetadata{ + { + Name: "table_name", + Type: NativeType{typ: TypeVarchar}, + }, + }, + ClusteringColumns: []*ColumnMetadata{ + { + Name: "index_name", + Type: NativeType{typ: TypeVarchar}, + Order: DESC, + }, + }, + Columns: map[string]*ColumnMetadata{ + "table_name": { + Name: "table_name", + Type: NativeType{typ: TypeVarchar}, + Kind: ColumnPartitionKey, + }, + "index_name": { + Name: "index_name", + Type: NativeType{typ: TypeVarchar}, + Order: DESC, + Kind: ColumnClusteringKey, + }, + "value": { + Name: "value", + Type: NativeType{typ: TypeBlob}, + Kind: ColumnRegular, + }, + }, + }, + "wiki_page": { + PartitionKey: []*ColumnMetadata{ + { + Name: "title", + Type: NativeType{typ: TypeVarchar}, + }, + }, + ClusteringColumns: []*ColumnMetadata{ + { + Name: "revid", + Type: NativeType{typ: TypeTimeUUID}, + Order: ASC, + }, + }, + Columns: map[string]*ColumnMetadata{ + "title": { + Name: "title", + Type: NativeType{typ: TypeVarchar}, + Kind: ColumnPartitionKey, + }, + "revid": { + Name: "revid", + Type: NativeType{typ: TypeTimeUUID}, + Kind: ColumnClusteringKey, + }, + }, + }, + "no_names": { + PartitionKey: []*ColumnMetadata{ + { + Name: "key", + Type: NativeType{typ: TypeUUID}, + }, + { + Name: "key2", + Type: NativeType{typ: TypeUUID}, + }, + }, + ClusteringColumns: []*ColumnMetadata{ + { + Name: "column", + Type: NativeType{typ: TypeInt}, + Order: ASC, + }, + { + Name: "column2", + Type: NativeType{typ: TypeInt}, + Order: ASC, + }, + { + Name: "column3", + Type: NativeType{typ: TypeInt}, + Order: ASC, + }, + }, + Columns: map[string]*ColumnMetadata{ + "key": { + Name: "key", + Type: NativeType{typ: TypeUUID}, + Kind: ColumnPartitionKey, + }, + "key2": { + Name: "key2", + Type: NativeType{typ: TypeUUID}, + Kind: ColumnPartitionKey, + }, + "column": { + Name: "column", + Type: NativeType{typ: TypeInt}, + Order: ASC, + Kind: ColumnClusteringKey, + }, + "column2": { + Name: "column2", + Type: NativeType{typ: TypeInt}, + Order: ASC, + Kind: ColumnClusteringKey, + }, + "column3": { + Name: "column3", + Type: NativeType{typ: TypeInt}, + Order: ASC, + Kind: ColumnClusteringKey, + }, + "value": { + Name: "value", + Type: NativeType{typ: TypeBlob}, + Kind: ColumnRegular, + }, + }, + }, + }, + }, + ) + + // V2 test - V2+ protocol is simpler so here are some toy examples to verify that the mapping works + keyspace = &KeyspaceMetadata{ + Name: "V2Keyspace", + } + tables = []TableMetadata{ + { + Keyspace: "V2Keyspace", + Name: "Table1", + }, + { + Keyspace: "V2Keyspace", + Name: "Table2", + }, + } + columns = []ColumnMetadata{ + { + Keyspace: "V2Keyspace", + Table: "Table1", + Name: "KEY1", + Kind: ColumnPartitionKey, + ComponentIndex: 0, + Validator: "org.apache.cassandra.db.marshal.UTF8Type", + }, + { + Keyspace: "V2Keyspace", + Table: "Table1", + Name: "Key1", + Kind: ColumnPartitionKey, + ComponentIndex: 0, + Validator: "org.apache.cassandra.db.marshal.UTF8Type", + }, + { + Keyspace: "V2Keyspace", + Table: "Table2", + Name: "Column1", + Kind: ColumnPartitionKey, + ComponentIndex: 0, + Validator: "org.apache.cassandra.db.marshal.UTF8Type", + }, + { + Keyspace: "V2Keyspace", + Table: "Table2", + Name: "Column2", + Kind: ColumnClusteringKey, + ComponentIndex: 0, + Validator: "org.apache.cassandra.db.marshal.UTF8Type", + }, + { + Keyspace: "V2Keyspace", + Table: "Table2", + Name: "Column3", + Kind: ColumnClusteringKey, + ComponentIndex: 1, + Validator: "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UTF8Type)", + }, + { + Keyspace: "V2Keyspace", + Table: "Table2", + Name: "Column4", + Kind: ColumnRegular, + Validator: "org.apache.cassandra.db.marshal.UTF8Type", + }, + } + compileMetadata(2, keyspace, tables, columns) + assertKeyspaceMetadata( + t, + keyspace, + &KeyspaceMetadata{ + Name: "V2Keyspace", + Tables: map[string]*TableMetadata{ + "Table1": { + PartitionKey: []*ColumnMetadata{ + { + Name: "Key1", + Type: NativeType{typ: TypeVarchar}, + }, + }, + ClusteringColumns: []*ColumnMetadata{}, + Columns: map[string]*ColumnMetadata{ + "KEY1": { + Name: "KEY1", + Type: NativeType{typ: TypeVarchar}, + Kind: ColumnPartitionKey, + }, + "Key1": { + Name: "Key1", + Type: NativeType{typ: TypeVarchar}, + Kind: ColumnPartitionKey, + }, + }, + }, + "Table2": { + PartitionKey: []*ColumnMetadata{ + { + Name: "Column1", + Type: NativeType{typ: TypeVarchar}, + }, + }, + ClusteringColumns: []*ColumnMetadata{ + { + Name: "Column2", + Type: NativeType{typ: TypeVarchar}, + Order: ASC, + }, + { + Name: "Column3", + Type: NativeType{typ: TypeVarchar}, + Order: DESC, + }, + }, + Columns: map[string]*ColumnMetadata{ + "Column1": { + Name: "Column1", + Type: NativeType{typ: TypeVarchar}, + Kind: ColumnPartitionKey, + }, + "Column2": { + Name: "Column2", + Type: NativeType{typ: TypeVarchar}, + Order: ASC, + Kind: ColumnClusteringKey, + }, + "Column3": { + Name: "Column3", + Type: NativeType{typ: TypeVarchar}, + Order: DESC, + Kind: ColumnClusteringKey, + }, + "Column4": { + Name: "Column4", + Type: NativeType{typ: TypeVarchar}, + Kind: ColumnRegular, + }, + }, + }, + }, + }, + ) +} + +// Helper function for asserting that actual metadata returned was as expected +func assertKeyspaceMetadata(t *testing.T, actual, expected *KeyspaceMetadata) { + if len(expected.Tables) != len(actual.Tables) { + t.Errorf("Expected len(%s.Tables) to be %v but was %v", expected.Name, len(expected.Tables), len(actual.Tables)) + } + for keyT := range expected.Tables { + et := expected.Tables[keyT] + at, found := actual.Tables[keyT] + + if !found { + t.Errorf("Expected %s.Tables[%s] but was not found", expected.Name, keyT) + } else { + if keyT != at.Name { + t.Errorf("Expected %s.Tables[%s].Name to be %v but was %v", expected.Name, keyT, keyT, at.Name) + } + if len(et.PartitionKey) != len(at.PartitionKey) { + t.Errorf("Expected len(%s.Tables[%s].PartitionKey) to be %v but was %v", expected.Name, keyT, len(et.PartitionKey), len(at.PartitionKey)) + } else { + for i := range et.PartitionKey { + if et.PartitionKey[i].Name != at.PartitionKey[i].Name { + t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Name to be '%v' but was '%v'", expected.Name, keyT, i, et.PartitionKey[i].Name, at.PartitionKey[i].Name) + } + if expected.Name != at.PartitionKey[i].Keyspace { + t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Keyspace to be '%v' but was '%v'", expected.Name, keyT, i, expected.Name, at.PartitionKey[i].Keyspace) + } + if keyT != at.PartitionKey[i].Table { + t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Table to be '%v' but was '%v'", expected.Name, keyT, i, keyT, at.PartitionKey[i].Table) + } + if et.PartitionKey[i].Type.Type() != at.PartitionKey[i].Type.Type() { + t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Type.Type to be %v but was %v", expected.Name, keyT, i, et.PartitionKey[i].Type.Type(), at.PartitionKey[i].Type.Type()) + } + if i != at.PartitionKey[i].ComponentIndex { + t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].ComponentIndex to be %v but was %v", expected.Name, keyT, i, i, at.PartitionKey[i].ComponentIndex) + } + if ColumnPartitionKey != at.PartitionKey[i].Kind { + t.Errorf("Expected %s.Tables[%s].PartitionKey[%d].Kind to be '%v' but was '%v'", expected.Name, keyT, i, ColumnPartitionKey, at.PartitionKey[i].Kind) + } + } + } + if len(et.ClusteringColumns) != len(at.ClusteringColumns) { + t.Errorf("Expected len(%s.Tables[%s].ClusteringColumns) to be %v but was %v", expected.Name, keyT, len(et.ClusteringColumns), len(at.ClusteringColumns)) + } else { + for i := range et.ClusteringColumns { + if at.ClusteringColumns[i] == nil { + t.Fatalf("Unexpected nil value: %s.Tables[%s].ClusteringColumns[%d]", expected.Name, keyT, i) + } + if et.ClusteringColumns[i].Name != at.ClusteringColumns[i].Name { + t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Name to be '%v' but was '%v'", expected.Name, keyT, i, et.ClusteringColumns[i].Name, at.ClusteringColumns[i].Name) + } + if expected.Name != at.ClusteringColumns[i].Keyspace { + t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Keyspace to be '%v' but was '%v'", expected.Name, keyT, i, expected.Name, at.ClusteringColumns[i].Keyspace) + } + if keyT != at.ClusteringColumns[i].Table { + t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Table to be '%v' but was '%v'", expected.Name, keyT, i, keyT, at.ClusteringColumns[i].Table) + } + if et.ClusteringColumns[i].Type.Type() != at.ClusteringColumns[i].Type.Type() { + t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Type.Type to be %v but was %v", expected.Name, keyT, i, et.ClusteringColumns[i].Type.Type(), at.ClusteringColumns[i].Type.Type()) + } + if i != at.ClusteringColumns[i].ComponentIndex { + t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].ComponentIndex to be %v but was %v", expected.Name, keyT, i, i, at.ClusteringColumns[i].ComponentIndex) + } + if et.ClusteringColumns[i].Order != at.ClusteringColumns[i].Order { + t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Order to be %v but was %v", expected.Name, keyT, i, et.ClusteringColumns[i].Order, at.ClusteringColumns[i].Order) + } + if ColumnClusteringKey != at.ClusteringColumns[i].Kind { + t.Errorf("Expected %s.Tables[%s].ClusteringColumns[%d].Kind to be '%v' but was '%v'", expected.Name, keyT, i, ColumnClusteringKey, at.ClusteringColumns[i].Kind) + } + } + } + if len(et.Columns) != len(at.Columns) { + eKeys := make([]string, 0, len(et.Columns)) + for key := range et.Columns { + eKeys = append(eKeys, key) + } + aKeys := make([]string, 0, len(at.Columns)) + for key := range at.Columns { + aKeys = append(aKeys, key) + } + t.Errorf("Expected len(%s.Tables[%s].Columns) to be %v (keys:%v) but was %v (keys:%v)", expected.Name, keyT, len(et.Columns), eKeys, len(at.Columns), aKeys) + } else { + for keyC := range et.Columns { + ec := et.Columns[keyC] + ac, found := at.Columns[keyC] + + if !found { + t.Errorf("Expected %s.Tables[%s].Columns[%s] but was not found", expected.Name, keyT, keyC) + } else { + if keyC != ac.Name { + t.Errorf("Expected %s.Tables[%s].Columns[%s].Name to be '%v' but was '%v'", expected.Name, keyT, keyC, keyC, at.Name) + } + if expected.Name != ac.Keyspace { + t.Errorf("Expected %s.Tables[%s].Columns[%s].Keyspace to be '%v' but was '%v'", expected.Name, keyT, keyC, expected.Name, ac.Keyspace) + } + if keyT != ac.Table { + t.Errorf("Expected %s.Tables[%s].Columns[%s].Table to be '%v' but was '%v'", expected.Name, keyT, keyC, keyT, ac.Table) + } + if ec.Type.Type() != ac.Type.Type() { + t.Errorf("Expected %s.Tables[%s].Columns[%s].Type.Type to be %v but was %v", expected.Name, keyT, keyC, ec.Type.Type(), ac.Type.Type()) + } + if ec.Order != ac.Order { + t.Errorf("Expected %s.Tables[%s].Columns[%s].Order to be %v but was %v", expected.Name, keyT, keyC, ec.Order, ac.Order) + } + if ec.Kind != ac.Kind { + t.Errorf("Expected %s.Tables[%s].Columns[%s].Kind to be '%v' but was '%v'", expected.Name, keyT, keyC, ec.Kind, ac.Kind) + } + } + } + } + } + } +} + +// Tests the cassandra type definition parser +func TestTypeParser(t *testing.T) { + // native type + assertParseNonCompositeType( + t, + "org.apache.cassandra.db.marshal.UTF8Type", + assertTypeInfo{Type: TypeVarchar}, + ) + + // reversed + assertParseNonCompositeType( + t, + "org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.UUIDType)", + assertTypeInfo{Type: TypeUUID, Reversed: true}, + ) + + // set + assertParseNonCompositeType( + t, + "org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.Int32Type)", + assertTypeInfo{ + Type: TypeSet, + Elem: &assertTypeInfo{Type: TypeInt}, + }, + ) + + // list + assertParseNonCompositeType( + t, + "org.apache.cassandra.db.marshal.ListType(org.apache.cassandra.db.marshal.TimeUUIDType)", + assertTypeInfo{ + Type: TypeList, + Elem: &assertTypeInfo{Type: TypeTimeUUID}, + }, + ) + + // map + assertParseNonCompositeType( + t, + " org.apache.cassandra.db.marshal.MapType( org.apache.cassandra.db.marshal.UUIDType , org.apache.cassandra.db.marshal.BytesType ) ", + assertTypeInfo{ + Type: TypeMap, + Key: &assertTypeInfo{Type: TypeUUID}, + Elem: &assertTypeInfo{Type: TypeBlob}, + }, + ) + + // custom + assertParseNonCompositeType( + t, + "org.apache.cassandra.db.marshal.UserType(sandbox,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,63697479:org.apache.cassandra.db.marshal.UTF8Type,7a6970:org.apache.cassandra.db.marshal.Int32Type)", + assertTypeInfo{Type: TypeCustom, Custom: "org.apache.cassandra.db.marshal.UserType(sandbox,61646472657373,737472656574:org.apache.cassandra.db.marshal.UTF8Type,63697479:org.apache.cassandra.db.marshal.UTF8Type,7a6970:org.apache.cassandra.db.marshal.Int32Type)"}, + ) + assertParseNonCompositeType( + t, + "org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,d=>org.apache.cassandra.db.marshal.DateType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,b=>org.apache.cassandra.db.marshal.BytesType,s=>org.apache.cassandra.db.marshal.UTF8Type,B=>org.apache.cassandra.db.marshal.BooleanType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,i=>org.apache.cassandra.db.marshal.IntegerType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType)", + assertTypeInfo{Type: TypeCustom, Custom: "org.apache.cassandra.db.marshal.DynamicCompositeType(u=>org.apache.cassandra.db.marshal.UUIDType,d=>org.apache.cassandra.db.marshal.DateType,t=>org.apache.cassandra.db.marshal.TimeUUIDType,b=>org.apache.cassandra.db.marshal.BytesType,s=>org.apache.cassandra.db.marshal.UTF8Type,B=>org.apache.cassandra.db.marshal.BooleanType,a=>org.apache.cassandra.db.marshal.AsciiType,l=>org.apache.cassandra.db.marshal.LongType,i=>org.apache.cassandra.db.marshal.IntegerType,x=>org.apache.cassandra.db.marshal.LexicalUUIDType)"}, + ) + + // composite defs + assertParseCompositeType( + t, + "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type)", + []assertTypeInfo{ + {Type: TypeVarchar}, + }, + nil, + ) + assertParseCompositeType( + t, + "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.ReversedType(org.apache.cassandra.db.marshal.DateType),org.apache.cassandra.db.marshal.UTF8Type)", + []assertTypeInfo{ + {Type: TypeTimestamp, Reversed: true}, + {Type: TypeVarchar}, + }, + nil, + ) + assertParseCompositeType( + t, + "org.apache.cassandra.db.marshal.CompositeType(org.apache.cassandra.db.marshal.UTF8Type,org.apache.cassandra.db.marshal.ColumnToCollectionType(726f77735f6d6572676564:org.apache.cassandra.db.marshal.MapType(org.apache.cassandra.db.marshal.Int32Type,org.apache.cassandra.db.marshal.LongType)))", + []assertTypeInfo{ + {Type: TypeVarchar}, + }, + map[string]assertTypeInfo{ + "rows_merged": { + Type: TypeMap, + Key: &assertTypeInfo{Type: TypeInt}, + Elem: &assertTypeInfo{Type: TypeBigInt}, + }, + }, + ) +} + +// expected data holder +type assertTypeInfo struct { + Type Type + Reversed bool + Elem *assertTypeInfo + Key *assertTypeInfo + Custom string +} + +// Helper function for asserting that the type parser returns the expected +// results for the given definition +func assertParseNonCompositeType( + t *testing.T, + def string, + typeExpected assertTypeInfo, +) { + + result := parseType(def) + if len(result.reversed) != 1 { + t.Errorf("%s expected %d reversed values but there were %d", def, 1, len(result.reversed)) + } + + assertParseNonCompositeTypes( + t, + def, + []assertTypeInfo{typeExpected}, + result.types, + ) + + // expect no composite part of the result + if result.isComposite { + t.Errorf("%s: Expected not composite", def) + } + if result.collections != nil { + t.Errorf("%s: Expected nil collections: %v", def, result.collections) + } +} + +// Helper function for asserting that the type parser returns the expected +// results for the given definition +func assertParseCompositeType( + t *testing.T, + def string, + typesExpected []assertTypeInfo, + collectionsExpected map[string]assertTypeInfo, +) { + + result := parseType(def) + if len(result.reversed) != len(typesExpected) { + t.Errorf("%s expected %d reversed values but there were %d", def, len(typesExpected), len(result.reversed)) + } + + assertParseNonCompositeTypes( + t, + def, + typesExpected, + result.types, + ) + + // expect composite part of the result + if !result.isComposite { + t.Errorf("%s: Expected composite", def) + } + if result.collections == nil { + t.Errorf("%s: Expected non-nil collections: %v", def, result.collections) + } + + for name, typeExpected := range collectionsExpected { + // check for an actual type for this name + typeActual, found := result.collections[name] + if !found { + t.Errorf("%s.tcollections: Expected param named %s but there wasn't", def, name) + } else { + // remove the actual from the collection so we can detect extras + delete(result.collections, name) + + // check the type + assertParseNonCompositeTypes( + t, + def+"collections["+name+"]", + []assertTypeInfo{typeExpected}, + []TypeInfo{typeActual}, + ) + } + } + + if len(result.collections) != 0 { + t.Errorf("%s.collections: Expected no more types in collections, but there was %v", def, result.collections) + } +} + +// Helper function for asserting that the type parser returns the expected +// results for the given definition +func assertParseNonCompositeTypes( + t *testing.T, + context string, + typesExpected []assertTypeInfo, + typesActual []TypeInfo, +) { + if len(typesActual) != len(typesExpected) { + t.Errorf("%s: Expected %d types, but there were %d", context, len(typesExpected), len(typesActual)) + } + + for i := range typesExpected { + typeExpected := typesExpected[i] + typeActual := typesActual[i] + + // shadow copy the context for local modification + context := context + if len(typesExpected) > 1 { + context = context + "[" + strconv.Itoa(i) + "]" + } + + // check the type + if typeActual.Type() != typeExpected.Type { + t.Errorf("%s: Expected to parse Type to %s but was %s", context, typeExpected.Type, typeActual.Type()) + } + // check the custom + if typeActual.Custom() != typeExpected.Custom { + t.Errorf("%s: Expected to parse Custom %s but was %s", context, typeExpected.Custom, typeActual.Custom()) + } + + collection, _ := typeActual.(CollectionType) + // check the elem + if typeExpected.Elem != nil { + if collection.Elem == nil { + t.Errorf("%s: Expected to parse Elem, but was nil ", context) + } else { + assertParseNonCompositeTypes( + t, + context+".Elem", + []assertTypeInfo{*typeExpected.Elem}, + []TypeInfo{collection.Elem}, + ) + } + } else if collection.Elem != nil { + t.Errorf("%s: Expected to not parse Elem, but was %+v", context, collection.Elem) + } + + // check the key + if typeExpected.Key != nil { + if collection.Key == nil { + t.Errorf("%s: Expected to parse Key, but was nil ", context) + } else { + assertParseNonCompositeTypes( + t, + context+".Key", + []assertTypeInfo{*typeExpected.Key}, + []TypeInfo{collection.Key}, + ) + } + } else if collection.Key != nil { + t.Errorf("%s: Expected to not parse Key, but was %+v", context, collection.Key) + } + } +} diff --git a/vendor/github.com/gocql/gocql/policies_test.go b/vendor/github.com/gocql/gocql/policies_test.go new file mode 100644 index 0000000000..ae7a45c398 --- /dev/null +++ b/vendor/github.com/gocql/gocql/policies_test.go @@ -0,0 +1,320 @@ +// Copyright (c) 2015 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/hailocab/go-hostpool" +) + +// Tests of the round-robin host selection policy implementation +func TestRoundRobinHostPolicy(t *testing.T) { + policy := RoundRobinHostPolicy() + + hosts := [...]*HostInfo{ + {hostId: "0", connectAddress: net.IPv4(0, 0, 0, 1)}, + {hostId: "1", connectAddress: net.IPv4(0, 0, 0, 2)}, + } + + for _, host := range hosts { + policy.AddHost(host) + } + + // interleaved iteration should always increment the host + iterA := policy.Pick(nil) + if actual := iterA(); actual.Info() != hosts[0] { + t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID()) + } + iterB := policy.Pick(nil) + if actual := iterB(); actual.Info() != hosts[1] { + t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID()) + } + if actual := iterB(); actual.Info() != hosts[0] { + t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID()) + } + if actual := iterA(); actual.Info() != hosts[1] { + t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID()) + } + + iterC := policy.Pick(nil) + if actual := iterC(); actual.Info() != hosts[0] { + t.Errorf("Expected hosts[0] but was hosts[%s]", actual.Info().HostID()) + } + if actual := iterC(); actual.Info() != hosts[1] { + t.Errorf("Expected hosts[1] but was hosts[%s]", actual.Info().HostID()) + } +} + +// Tests of the token-aware host selection policy implementation with a +// round-robin host selection policy fallback. +func TestTokenAwareHostPolicy(t *testing.T) { + policy := TokenAwareHostPolicy(RoundRobinHostPolicy()) + + query := &Query{} + + iter := policy.Pick(nil) + if iter == nil { + t.Fatal("host iterator was nil") + } + actual := iter() + if actual != nil { + t.Fatalf("expected nil from iterator, but was %v", actual) + } + + // set the hosts + hosts := [...]*HostInfo{ + {connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{"00"}}, + {connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{"25"}}, + {connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{"50"}}, + {connectAddress: net.IPv4(10, 0, 0, 4), tokens: []string{"75"}}, + } + for _, host := range hosts { + policy.AddHost(host) + } + + // the token ring is not setup without the partitioner, but the fallback + // should work + if actual := policy.Pick(nil)(); !actual.Info().ConnectAddress().Equal(hosts[0].ConnectAddress()) { + t.Errorf("Expected peer 0 but was %s", actual.Info().ConnectAddress()) + } + + query.RoutingKey([]byte("30")) + if actual := policy.Pick(query)(); !actual.Info().ConnectAddress().Equal(hosts[1].ConnectAddress()) { + t.Errorf("Expected peer 1 but was %s", actual.Info().ConnectAddress()) + } + + policy.SetPartitioner("OrderedPartitioner") + + // now the token ring is configured + query.RoutingKey([]byte("20")) + iter = policy.Pick(query) + if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[1].ConnectAddress()) { + t.Errorf("Expected peer 1 but was %s", actual.Info().ConnectAddress()) + } + // rest are round robin + if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[2].ConnectAddress()) { + t.Errorf("Expected peer 2 but was %s", actual.Info().ConnectAddress()) + } + if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[3].ConnectAddress()) { + t.Errorf("Expected peer 3 but was %s", actual.Info().ConnectAddress()) + } + if actual := iter(); !actual.Info().ConnectAddress().Equal(hosts[0].ConnectAddress()) { + t.Errorf("Expected peer 0 but was %s", actual.Info().ConnectAddress()) + } +} + +// Tests of the host pool host selection policy implementation +func TestHostPoolHostPolicy(t *testing.T) { + policy := HostPoolHostPolicy(hostpool.New(nil)) + + hosts := []*HostInfo{ + {hostId: "0", connectAddress: net.IPv4(10, 0, 0, 0)}, + {hostId: "1", connectAddress: net.IPv4(10, 0, 0, 1)}, + } + + // Using set host to control the ordering of the hosts as calling "AddHost" iterates the map + // which will result in an unpredictable ordering + policy.(*hostPoolHostPolicy).SetHosts(hosts) + + // the first host selected is actually at [1], but this is ok for RR + // interleaved iteration should always increment the host + iter := policy.Pick(nil) + actualA := iter() + if actualA.Info().HostID() != "0" { + t.Errorf("Expected hosts[0] but was hosts[%s]", actualA.Info().HostID()) + } + actualA.Mark(nil) + + actualB := iter() + if actualB.Info().HostID() != "1" { + t.Errorf("Expected hosts[1] but was hosts[%s]", actualB.Info().HostID()) + } + actualB.Mark(fmt.Errorf("error")) + + actualC := iter() + if actualC.Info().HostID() != "0" { + t.Errorf("Expected hosts[0] but was hosts[%s]", actualC.Info().HostID()) + } + actualC.Mark(nil) + + actualD := iter() + if actualD.Info().HostID() != "0" { + t.Errorf("Expected hosts[0] but was hosts[%s]", actualD.Info().HostID()) + } + actualD.Mark(nil) +} + +func TestRoundRobinNilHostInfo(t *testing.T) { + policy := RoundRobinHostPolicy() + + host := &HostInfo{hostId: "host-1"} + policy.AddHost(host) + + iter := policy.Pick(nil) + next := iter() + if next == nil { + t.Fatal("got nil host") + } else if v := next.Info(); v == nil { + t.Fatal("got nil HostInfo") + } else if v.HostID() != host.HostID() { + t.Fatalf("expected host %v got %v", host, v) + } + + next = iter() + if next != nil { + t.Errorf("expected to get nil host got %+v", next) + if next.Info() == nil { + t.Fatalf("HostInfo is nil") + } + } +} + +func TestTokenAwareNilHostInfo(t *testing.T) { + policy := TokenAwareHostPolicy(RoundRobinHostPolicy()) + + hosts := [...]*HostInfo{ + {connectAddress: net.IPv4(10, 0, 0, 0), tokens: []string{"00"}}, + {connectAddress: net.IPv4(10, 0, 0, 1), tokens: []string{"25"}}, + {connectAddress: net.IPv4(10, 0, 0, 2), tokens: []string{"50"}}, + {connectAddress: net.IPv4(10, 0, 0, 3), tokens: []string{"75"}}, + } + for _, host := range hosts { + policy.AddHost(host) + } + policy.SetPartitioner("OrderedPartitioner") + + query := &Query{} + query.RoutingKey([]byte("20")) + + iter := policy.Pick(query) + next := iter() + if next == nil { + t.Fatal("got nil host") + } else if v := next.Info(); v == nil { + t.Fatal("got nil HostInfo") + } else if !v.ConnectAddress().Equal(hosts[1].ConnectAddress()) { + t.Fatalf("expected peer 1 got %v", v.ConnectAddress()) + } + + // Empty the hosts to trigger the panic when using the fallback. + for _, host := range hosts { + policy.RemoveHost(host) + } + + next = iter() + if next != nil { + t.Errorf("expected to get nil host got %+v", next) + if next.Info() == nil { + t.Fatalf("HostInfo is nil") + } + } +} + +func TestCOWList_Add(t *testing.T) { + var cow cowHostList + + toAdd := [...]net.IP{net.IPv4(10, 0, 0, 1), net.IPv4(10, 0, 0, 2), net.IPv4(10, 0, 0, 3)} + + for _, addr := range toAdd { + if !cow.add(&HostInfo{connectAddress: addr}) { + t.Fatal("did not add peer which was not in the set") + } + } + + hosts := cow.get() + if len(hosts) != len(toAdd) { + t.Fatalf("expected to have %d hosts got %d", len(toAdd), len(hosts)) + } + + set := make(map[string]bool) + for _, host := range hosts { + set[string(host.ConnectAddress())] = true + } + + for _, addr := range toAdd { + if !set[string(addr)] { + t.Errorf("addr was not in the host list: %q", addr) + } + } +} + +// TestSimpleRetryPolicy makes sure that we only allow 1 + numRetries attempts +func TestSimpleRetryPolicy(t *testing.T) { + q := &Query{} + + // this should allow a total of 3 tries. + rt := &SimpleRetryPolicy{NumRetries: 2} + + cases := []struct { + attempts int + allow bool + }{ + {0, true}, + {1, true}, + {2, true}, + {3, false}, + {4, false}, + {5, false}, + } + + for _, c := range cases { + q.attempts = c.attempts + if c.allow && !rt.Attempt(q) { + t.Fatalf("should allow retry after %d attempts", c.attempts) + } + if !c.allow && rt.Attempt(q) { + t.Fatalf("should not allow retry after %d attempts", c.attempts) + } + } +} + +func TestExponentialBackoffPolicy(t *testing.T) { + // test with defaults + sut := &ExponentialBackoffRetryPolicy{NumRetries: 2} + + cases := []struct { + attempts int + delay time.Duration + }{ + + {1, 100 * time.Millisecond}, + {2, (2) * 100 * time.Millisecond}, + {3, (2 * 2) * 100 * time.Millisecond}, + {4, (2 * 2 * 2) * 100 * time.Millisecond}, + } + for _, c := range cases { + // test 100 times for each case + for i := 0; i < 100; i++ { + d := sut.napTime(c.attempts) + if d < c.delay-(100*time.Millisecond)/2 { + t.Fatalf("Delay %d less than jitter min of %d", d, c.delay-100*time.Millisecond/2) + } + if d > c.delay+(100*time.Millisecond)/2 { + t.Fatalf("Delay %d greater than jitter max of %d", d, c.delay+100*time.Millisecond/2) + } + } + } +} + +func TestDCAwareRR(t *testing.T) { + p := DCAwareRoundRobinPolicy("local") + p.AddHost(&HostInfo{connectAddress: net.ParseIP("10.0.0.1"), dataCenter: "local"}) + p.AddHost(&HostInfo{connectAddress: net.ParseIP("10.0.0.2"), dataCenter: "remote"}) + + iter := p.Pick(nil) + + h := iter() + if h.Info().DataCenter() != "local" { + t.Fatalf("expected to get local DC first, got %v", h.Info()) + } + h = iter() + if h.Info().DataCenter() != "remote" { + t.Fatalf("expected to get remote DC, got %v", h.Info()) + } +} diff --git a/vendor/github.com/gocql/gocql/ring_test.go b/vendor/github.com/gocql/gocql/ring_test.go new file mode 100644 index 0000000000..88e593a32e --- /dev/null +++ b/vendor/github.com/gocql/gocql/ring_test.go @@ -0,0 +1,38 @@ +package gocql + +import ( + "net" + "testing" +) + +func TestRing_AddHostIfMissing_Missing(t *testing.T) { + ring := &ring{} + + host := &HostInfo{connectAddress: net.IPv4(1, 1, 1, 1)} + h1, ok := ring.addHostIfMissing(host) + if ok { + t.Fatal("host was reported as already existing") + } else if !h1.Equal(host) { + t.Fatalf("hosts not equal that are returned %v != %v", h1, host) + } else if h1 != host { + t.Fatalf("returned host same pointer: %p != %p", h1, host) + } +} + +func TestRing_AddHostIfMissing_Existing(t *testing.T) { + ring := &ring{} + + host := &HostInfo{connectAddress: net.IPv4(1, 1, 1, 1)} + ring.addHostIfMissing(host) + + h2 := &HostInfo{connectAddress: net.IPv4(1, 1, 1, 1)} + + h1, ok := ring.addHostIfMissing(h2) + if !ok { + t.Fatal("host was not reported as already existing") + } else if !h1.Equal(host) { + t.Fatalf("hosts not equal that are returned %v != %v", h1, host) + } else if h1 != host { + t.Fatalf("returned host same pointer: %p != %p", h1, host) + } +} diff --git a/vendor/github.com/gocql/gocql/session_connect_test.go b/vendor/github.com/gocql/gocql/session_connect_test.go new file mode 100644 index 0000000000..9d0539f8c7 --- /dev/null +++ b/vendor/github.com/gocql/gocql/session_connect_test.go @@ -0,0 +1,131 @@ +package gocql + +import ( + "context" + "net" + "strconv" + "sync" + "testing" + "time" +) + +type OneConnTestServer struct { + Err error + Addr net.IP + Port int + + listener net.Listener + acceptChan chan struct{} + mu sync.Mutex + closed bool +} + +func NewOneConnTestServer() (*OneConnTestServer, error) { + lstn, err := net.Listen("tcp4", "localhost:0") + if err != nil { + return nil, err + } + addr, port := parseAddressPort(lstn.Addr().String()) + return &OneConnTestServer{ + listener: lstn, + acceptChan: make(chan struct{}), + Addr: addr, + Port: port, + }, nil +} + +func (c *OneConnTestServer) Accepted() chan struct{} { + return c.acceptChan +} + +func (c *OneConnTestServer) Close() { + c.lockedClose() +} + +func (c *OneConnTestServer) Serve() { + conn, err := c.listener.Accept() + c.Err = err + if conn != nil { + conn.Close() + } + c.lockedClose() +} + +func (c *OneConnTestServer) lockedClose() { + c.mu.Lock() + defer c.mu.Unlock() + if !c.closed { + close(c.acceptChan) + c.listener.Close() + c.closed = true + } +} + +func parseAddressPort(hostPort string) (net.IP, int) { + host, portStr, err := net.SplitHostPort(hostPort) + if err != nil { + return net.ParseIP(""), 0 + } + port, _ := strconv.Atoi(portStr) + return net.ParseIP(host), port +} + +func testConnErrorHandler(t *testing.T) ConnErrorHandler { + return connErrorHandlerFn(func(conn *Conn, err error, closed bool) { + t.Errorf("in connection handler: %v", err) + }) +} + +func assertConnectionEventually(t *testing.T, wait time.Duration, srvr *OneConnTestServer) { + ctx, cancel := context.WithTimeout(context.Background(), wait) + defer cancel() + + select { + case <-ctx.Done(): + if ctx.Err() != nil { + t.Errorf("waiting for connection: %v", ctx.Err()) + } + case <-srvr.Accepted(): + if srvr.Err != nil { + t.Errorf("accepting connection: %v", srvr.Err) + } + } +} + +func TestSession_connect_WithNoTranslator(t *testing.T) { + srvr, err := NewOneConnTestServer() + assertNil(t, "error when creating tcp server", err) + defer srvr.Close() + + session := createTestSession() + defer session.Close() + + go srvr.Serve() + + Connect(&HostInfo{ + connectAddress: srvr.Addr, + port: srvr.Port, + }, session.connCfg, testConnErrorHandler(t), session) + + assertConnectionEventually(t, 500*time.Millisecond, srvr) +} + +func TestSession_connect_WithTranslator(t *testing.T) { + srvr, err := NewOneConnTestServer() + assertNil(t, "error when creating tcp server", err) + defer srvr.Close() + + session := createTestSession() + defer session.Close() + session.cfg.AddressTranslator = staticAddressTranslator(srvr.Addr, srvr.Port) + + go srvr.Serve() + + // the provided address will be translated + Connect(&HostInfo{ + connectAddress: net.ParseIP("10.10.10.10"), + port: 5432, + }, session.connCfg, testConnErrorHandler(t), session) + + assertConnectionEventually(t, 500*time.Millisecond, srvr) +} diff --git a/vendor/github.com/gocql/gocql/session_test.go b/vendor/github.com/gocql/gocql/session_test.go new file mode 100644 index 0000000000..eb7035f09e --- /dev/null +++ b/vendor/github.com/gocql/gocql/session_test.go @@ -0,0 +1,252 @@ +// +build all integration + +package gocql + +import ( + "fmt" + "testing" +) + +func TestSessionAPI(t *testing.T) { + cfg := &ClusterConfig{} + + s := &Session{ + cfg: *cfg, + cons: Quorum, + policy: RoundRobinHostPolicy(), + } + + s.pool = cfg.PoolConfig.buildPool(s) + s.executor = &queryExecutor{ + pool: s.pool, + policy: s.policy, + } + defer s.Close() + + s.SetConsistency(All) + if s.cons != All { + t.Fatalf("expected consistency 'All', got '%v'", s.cons) + } + + s.SetPageSize(100) + if s.pageSize != 100 { + t.Fatalf("expected pageSize 100, got %v", s.pageSize) + } + + s.SetPrefetch(0.75) + if s.prefetch != 0.75 { + t.Fatalf("expceted prefetch 0.75, got %v", s.prefetch) + } + + trace := &traceWriter{} + + s.SetTrace(trace) + if s.trace != trace { + t.Fatalf("expected traceWriter '%v',got '%v'", trace, s.trace) + } + + qry := s.Query("test", 1) + if v, ok := qry.values[0].(int); !ok { + t.Fatalf("expected qry.values[0] to be an int, got %v", qry.values[0]) + } else if v != 1 { + t.Fatalf("expceted qry.values[0] to be 1, got %v", v) + } else if qry.stmt != "test" { + t.Fatalf("expected qry.stmt to be 'test', got '%v'", qry.stmt) + } + + boundQry := s.Bind("test", func(q *QueryInfo) ([]interface{}, error) { + return nil, nil + }) + if boundQry.binding == nil { + t.Fatal("expected qry.binding to be defined, got nil") + } else if boundQry.stmt != "test" { + t.Fatalf("expected qry.stmt to be 'test', got '%v'", boundQry.stmt) + } + + itr := s.executeQuery(qry) + if itr.err != ErrNoConnections { + t.Fatalf("expected itr.err to be '%v', got '%v'", ErrNoConnections, itr.err) + } + + testBatch := s.NewBatch(LoggedBatch) + testBatch.Query("test") + err := s.ExecuteBatch(testBatch) + + if err != ErrNoConnections { + t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrNoConnections, err) + } + + s.Close() + if !s.Closed() { + t.Fatal("expected s.Closed() to be true, got false") + } + //Should just return cleanly + s.Close() + + err = s.ExecuteBatch(testBatch) + if err != ErrSessionClosed { + t.Fatalf("expected session.ExecuteBatch to return '%v', got '%v'", ErrSessionClosed, err) + } +} + +func TestQueryBasicAPI(t *testing.T) { + qry := &Query{} + + if qry.Latency() != 0 { + t.Fatalf("expected Query.Latency() to return 0, got %v", qry.Latency()) + } + + qry.attempts = 2 + qry.totalLatency = 4 + if qry.Attempts() != 2 { + t.Fatalf("expected Query.Attempts() to return 2, got %v", qry.Attempts()) + } + if qry.Latency() != 2 { + t.Fatalf("expected Query.Latency() to return 2, got %v", qry.Latency()) + } + + qry.Consistency(All) + if qry.GetConsistency() != All { + t.Fatalf("expected Query.GetConsistency to return 'All', got '%s'", qry.GetConsistency()) + } + + trace := &traceWriter{} + qry.Trace(trace) + if qry.trace != trace { + t.Fatalf("expected Query.Trace to be '%v', got '%v'", trace, qry.trace) + } + + qry.PageSize(10) + if qry.pageSize != 10 { + t.Fatalf("expected Query.PageSize to be 10, got %v", qry.pageSize) + } + + qry.Prefetch(0.75) + if qry.prefetch != 0.75 { + t.Fatalf("expected Query.Prefetch to be 0.75, got %v", qry.prefetch) + } + + rt := &SimpleRetryPolicy{NumRetries: 3} + if qry.RetryPolicy(rt); qry.rt != rt { + t.Fatalf("expected Query.RetryPolicy to be '%v', got '%v'", rt, qry.rt) + } + + qry.Bind(qry) + if qry.values[0] != qry { + t.Fatalf("expected Query.Values[0] to be '%v', got '%v'", qry, qry.values[0]) + } +} + +func TestQueryShouldPrepare(t *testing.T) { + toPrepare := []string{"select * ", "INSERT INTO", "update table", "delete from", "begin batch"} + cantPrepare := []string{"create table", "USE table", "LIST keyspaces", "alter table", "drop table", "grant user", "revoke user"} + q := &Query{} + + for i := 0; i < len(toPrepare); i++ { + q.stmt = toPrepare[i] + if !q.shouldPrepare() { + t.Fatalf("expected Query.shouldPrepare to return true, got false for statement '%v'", toPrepare[i]) + } + } + + for i := 0; i < len(cantPrepare); i++ { + q.stmt = cantPrepare[i] + if q.shouldPrepare() { + t.Fatalf("expected Query.shouldPrepare to return false, got true for statement '%v'", cantPrepare[i]) + } + } +} + +func TestBatchBasicAPI(t *testing.T) { + + cfg := &ClusterConfig{RetryPolicy: &SimpleRetryPolicy{NumRetries: 2}} + + s := &Session{ + cfg: *cfg, + cons: Quorum, + } + defer s.Close() + + s.pool = cfg.PoolConfig.buildPool(s) + + b := s.NewBatch(UnloggedBatch) + if b.Type != UnloggedBatch { + t.Fatalf("expceted batch.Type to be '%v', got '%v'", UnloggedBatch, b.Type) + } else if b.rt != cfg.RetryPolicy { + t.Fatalf("expceted batch.RetryPolicy to be '%v', got '%v'", cfg.RetryPolicy, b.rt) + } + + b = NewBatch(LoggedBatch) + if b.Type != LoggedBatch { + t.Fatalf("expected batch.Type to be '%v', got '%v'", LoggedBatch, b.Type) + } + + b.attempts = 1 + if b.Attempts() != 1 { + t.Fatalf("expceted batch.Attempts() to return %v, got %v", 1, b.Attempts()) + } + + if b.Latency() != 0 { + t.Fatalf("expected batch.Latency() to be 0, got %v", b.Latency()) + } + + b.totalLatency = 4 + if b.Latency() != 4 { + t.Fatalf("expected batch.Latency() to return %v, got %v", 4, b.Latency()) + } + + b.Cons = One + if b.GetConsistency() != One { + t.Fatalf("expected batch.GetConsistency() to return 'One', got '%s'", b.GetConsistency()) + } + + b.Query("test", 1) + if b.Entries[0].Stmt != "test" { + t.Fatalf("expected batch.Entries[0].Stmt to be 'test', got '%v'", b.Entries[0].Stmt) + } else if b.Entries[0].Args[0].(int) != 1 { + t.Fatalf("expected batch.Entries[0].Args[0] to be 1, got %v", b.Entries[0].Args[0]) + } + + b.Bind("test2", func(q *QueryInfo) ([]interface{}, error) { + return nil, nil + }) + + if b.Entries[1].Stmt != "test2" { + t.Fatalf("expected batch.Entries[1].Stmt to be 'test2', got '%v'", b.Entries[1].Stmt) + } else if b.Entries[1].binding == nil { + t.Fatal("expected batch.Entries[1].binding to be defined, got nil") + } + r := &SimpleRetryPolicy{NumRetries: 4} + + b.RetryPolicy(r) + if b.rt != r { + t.Fatalf("expected batch.RetryPolicy to be '%v', got '%v'", r, b.rt) + } + + if b.Size() != 2 { + t.Fatalf("expected batch.Size() to return 2, got %v", b.Size()) + } + +} + +func TestConsistencyNames(t *testing.T) { + names := map[fmt.Stringer]string{ + Any: "ANY", + One: "ONE", + Two: "TWO", + Three: "THREE", + Quorum: "QUORUM", + All: "ALL", + LocalQuorum: "LOCAL_QUORUM", + EachQuorum: "EACH_QUORUM", + Serial: "SERIAL", + LocalSerial: "LOCAL_SERIAL", + LocalOne: "LOCAL_ONE", + } + + for k, v := range names { + if k.String() != v { + t.Fatalf("expected '%v', got '%v'", v, k.String()) + } + } +} diff --git a/vendor/github.com/gocql/gocql/stress_test.go b/vendor/github.com/gocql/gocql/stress_test.go new file mode 100644 index 0000000000..e4abac5169 --- /dev/null +++ b/vendor/github.com/gocql/gocql/stress_test.go @@ -0,0 +1,70 @@ +// +build all integration + +package gocql + +import ( + "sync/atomic" + + "testing" +) + +func BenchmarkConnStress(b *testing.B) { + const workers = 16 + + cluster := createCluster() + cluster.NumConns = 1 + session := createSessionFromCluster(cluster, b) + defer session.Close() + + if err := createTable(session, "CREATE TABLE IF NOT EXISTS conn_stress (id int primary key)"); err != nil { + b.Fatal(err) + } + + var seed uint64 + writer := func(pb *testing.PB) { + seed := atomic.AddUint64(&seed, 1) + var i uint64 = 0 + for pb.Next() { + if err := session.Query("insert into conn_stress (id) values (?)", i*seed).Exec(); err != nil { + b.Error(err) + return + } + i++ + } + } + + b.SetParallelism(workers) + b.RunParallel(writer) +} + +func BenchmarkConnRoutingKey(b *testing.B) { + const workers = 16 + + cluster := createCluster() + cluster.NumConns = 1 + cluster.PoolConfig.HostSelectionPolicy = TokenAwareHostPolicy(RoundRobinHostPolicy()) + session := createSessionFromCluster(cluster, b) + defer session.Close() + + if err := createTable(session, "CREATE TABLE IF NOT EXISTS routing_key_stress (id int primary key)"); err != nil { + b.Fatal(err) + } + + var seed uint64 + writer := func(pb *testing.PB) { + seed := atomic.AddUint64(&seed, 1) + var i uint64 = 0 + query := session.Query("insert into routing_key_stress (id) values (?)") + + for pb.Next() { + if _, err := query.Bind(i * seed).GetRoutingKey(); err != nil { + b.Error(err) + return + } + i++ + } + } + + b.SetParallelism(workers) + b.RunParallel(writer) +} diff --git a/vendor/github.com/gocql/gocql/token_test.go b/vendor/github.com/gocql/gocql/token_test.go new file mode 100644 index 0000000000..b71ff74cc7 --- /dev/null +++ b/vendor/github.com/gocql/gocql/token_test.go @@ -0,0 +1,335 @@ +// Copyright (c) 2015 The gocql Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gocql + +import ( + "bytes" + "fmt" + "math/big" + "net" + "sort" + "strconv" + "testing" +) + +// Tests of the murmur3Patitioner +func TestMurmur3Partitioner(t *testing.T) { + token := murmur3Partitioner{}.ParseString("-1053604476080545076") + + if "-1053604476080545076" != token.String() { + t.Errorf("Expected '-1053604476080545076' but was '%s'", token) + } + + // at least verify that the partitioner + // doesn't return nil + pk, _ := marshalInt(nil, 1) + token = murmur3Partitioner{}.Hash(pk) + if token == nil { + t.Fatal("token was nil") + } +} + +// Tests of the murmur3Token +func TestMurmur3Token(t *testing.T) { + if murmur3Token(42).Less(murmur3Token(42)) { + t.Errorf("Expected Less to return false, but was true") + } + if !murmur3Token(-42).Less(murmur3Token(42)) { + t.Errorf("Expected Less to return true, but was false") + } + if murmur3Token(42).Less(murmur3Token(-42)) { + t.Errorf("Expected Less to return false, but was true") + } +} + +// Tests of the orderedPartitioner +func TestOrderedPartitioner(t *testing.T) { + // at least verify that the partitioner + // doesn't return nil + p := orderedPartitioner{} + pk, _ := marshalInt(nil, 1) + token := p.Hash(pk) + if token == nil { + t.Fatal("token was nil") + } + + str := token.String() + parsedToken := p.ParseString(str) + + if !bytes.Equal([]byte(token.(orderedToken)), []byte(parsedToken.(orderedToken))) { + t.Errorf("Failed to convert to and from a string %s expected %x but was %x", + str, + []byte(token.(orderedToken)), + []byte(parsedToken.(orderedToken)), + ) + } +} + +// Tests of the orderedToken +func TestOrderedToken(t *testing.T) { + if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 4, 2})) { + t.Errorf("Expected Less to return false, but was true") + } + if !orderedToken([]byte{0, 0, 3}).Less(orderedToken([]byte{0, 0, 4, 2})) { + t.Errorf("Expected Less to return true, but was false") + } + if orderedToken([]byte{0, 0, 4, 2}).Less(orderedToken([]byte{0, 0, 3})) { + t.Errorf("Expected Less to return false, but was true") + } +} + +// Tests of the randomPartitioner +func TestRandomPartitioner(t *testing.T) { + // at least verify that the partitioner + // doesn't return nil + p := randomPartitioner{} + pk, _ := marshalInt(nil, 1) + token := p.Hash(pk) + if token == nil { + t.Fatal("token was nil") + } + + str := token.String() + parsedToken := p.ParseString(str) + + if (*big.Int)(token.(*randomToken)).Cmp((*big.Int)(parsedToken.(*randomToken))) != 0 { + t.Errorf("Failed to convert to and from a string %s expected %v but was %v", + str, + token, + parsedToken, + ) + } +} + +func TestRandomPartitionerMatchesReference(t *testing.T) { + // example taken from datastax python driver + // >>> from cassandra.metadata import MD5Token + // >>> MD5Token.hash_fn("test") + // 12707736894140473154801792860916528374L + var p randomPartitioner + expect := "12707736894140473154801792860916528374" + actual := p.Hash([]byte("test")).String() + if actual != expect { + t.Errorf("expected random partitioner to generate tokens in the same way as the reference"+ + " python client. Expected %s, but got %s", expect, actual) + } +} + +// Tests of the randomToken +func TestRandomToken(t *testing.T) { + if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(42))) { + t.Errorf("Expected Less to return false, but was true") + } + if !((*randomToken)(big.NewInt(41))).Less((*randomToken)(big.NewInt(42))) { + t.Errorf("Expected Less to return true, but was false") + } + if ((*randomToken)(big.NewInt(42))).Less((*randomToken)(big.NewInt(41))) { + t.Errorf("Expected Less to return false, but was true") + } +} + +type intToken int + +func (i intToken) String() string { + return strconv.Itoa(int(i)) +} + +func (i intToken) Less(token token) bool { + return i < token.(intToken) +} + +// Test of the token ring implementation based on example at the start of this +// page of documentation: +// http://www.datastax.com/docs/0.8/cluster_architecture/partitioning +func TestIntTokenRing(t *testing.T) { + host0 := &HostInfo{} + host25 := &HostInfo{} + host50 := &HostInfo{} + host75 := &HostInfo{} + ring := &tokenRing{ + partitioner: nil, + // these tokens and hosts are out of order to test sorting + tokens: []token{ + intToken(0), + intToken(50), + intToken(75), + intToken(25), + }, + hosts: []*HostInfo{ + host0, + host50, + host75, + host25, + }, + } + + sort.Sort(ring) + + if ring.GetHostForToken(intToken(0)) != host0 { + t.Error("Expected host 0 for token 0") + } + if ring.GetHostForToken(intToken(1)) != host25 { + t.Error("Expected host 25 for token 1") + } + if ring.GetHostForToken(intToken(24)) != host25 { + t.Error("Expected host 25 for token 24") + } + if ring.GetHostForToken(intToken(25)) != host25 { + t.Error("Expected host 25 for token 25") + } + if ring.GetHostForToken(intToken(26)) != host50 { + t.Error("Expected host 50 for token 26") + } + if ring.GetHostForToken(intToken(49)) != host50 { + t.Error("Expected host 50 for token 49") + } + if ring.GetHostForToken(intToken(50)) != host50 { + t.Error("Expected host 50 for token 50") + } + if ring.GetHostForToken(intToken(51)) != host75 { + t.Error("Expected host 75 for token 51") + } + if ring.GetHostForToken(intToken(74)) != host75 { + t.Error("Expected host 75 for token 74") + } + if ring.GetHostForToken(intToken(75)) != host75 { + t.Error("Expected host 75 for token 75") + } + if ring.GetHostForToken(intToken(76)) != host0 { + t.Error("Expected host 0 for token 76") + } + if ring.GetHostForToken(intToken(99)) != host0 { + t.Error("Expected host 0 for token 99") + } + if ring.GetHostForToken(intToken(100)) != host0 { + t.Error("Expected host 0 for token 100") + } +} + +// Test for the behavior of a nil pointer to tokenRing +func TestNilTokenRing(t *testing.T) { + var ring *tokenRing = nil + + if ring.GetHostForToken(nil) != nil { + t.Error("Expected nil for nil token ring") + } + if ring.GetHostForPartitionKey(nil) != nil { + t.Error("Expected nil for nil token ring") + } +} + +// Test of the recognition of the partitioner class +func TestUnknownTokenRing(t *testing.T) { + _, err := newTokenRing("UnknownPartitioner", nil) + if err == nil { + t.Error("Expected error for unknown partitioner value, but was nil") + } +} + +func hostsForTests(n int) []*HostInfo { + hosts := make([]*HostInfo, n) + for i := 0; i < n; i++ { + host := &HostInfo{ + connectAddress: net.IPv4(1, 1, 1, byte(n)), + tokens: []string{fmt.Sprintf("%d", n)}, + } + + hosts[i] = host + } + return hosts +} + +// Test of the tokenRing with the Murmur3Partitioner +func TestMurmur3TokenRing(t *testing.T) { + // Note, strings are parsed directly to int64, they are not murmur3 hashed + hosts := hostsForTests(4) + ring, err := newTokenRing("Murmur3Partitioner", hosts) + if err != nil { + t.Fatalf("Failed to create token ring due to error: %v", err) + } + + p := murmur3Partitioner{} + + for _, host := range hosts { + actual := ring.GetHostForToken(p.ParseString(host.tokens[0])) + if !actual.ConnectAddress().Equal(host.ConnectAddress()) { + t.Errorf("Expected address %v for token %q, but was %v", host.ConnectAddress(), + host.tokens[0], actual.ConnectAddress()) + } + } + + actual := ring.GetHostForToken(p.ParseString("12")) + if !actual.ConnectAddress().Equal(hosts[1].ConnectAddress()) { + t.Errorf("Expected address 1 for token \"12\", but was %s", actual.ConnectAddress()) + } + + actual = ring.GetHostForToken(p.ParseString("24324545443332")) + if !actual.ConnectAddress().Equal(hosts[0].ConnectAddress()) { + t.Errorf("Expected address 0 for token \"24324545443332\", but was %s", actual.ConnectAddress()) + } +} + +// Test of the tokenRing with the OrderedPartitioner +func TestOrderedTokenRing(t *testing.T) { + // Tokens here more or less are similar layout to the int tokens above due + // to each numeric character translating to a consistently offset byte. + hosts := hostsForTests(4) + ring, err := newTokenRing("OrderedPartitioner", hosts) + if err != nil { + t.Fatalf("Failed to create token ring due to error: %v", err) + } + + p := orderedPartitioner{} + + var actual *HostInfo + for _, host := range hosts { + actual = ring.GetHostForToken(p.ParseString(host.tokens[0])) + if !actual.ConnectAddress().Equal(host.ConnectAddress()) { + t.Errorf("Expected address %v for token %q, but was %v", host.ConnectAddress(), + host.tokens[0], actual.ConnectAddress()) + } + } + + actual = ring.GetHostForToken(p.ParseString("12")) + if !actual.peer.Equal(hosts[1].peer) { + t.Errorf("Expected address 1 for token \"12\", but was %s", actual.ConnectAddress()) + } + + actual = ring.GetHostForToken(p.ParseString("24324545443332")) + if !actual.ConnectAddress().Equal(hosts[1].ConnectAddress()) { + t.Errorf("Expected address 1 for token \"24324545443332\", but was %s", actual.ConnectAddress()) + } +} + +// Test of the tokenRing with the RandomPartitioner +func TestRandomTokenRing(t *testing.T) { + // String tokens are parsed into big.Int in base 10 + hosts := hostsForTests(4) + ring, err := newTokenRing("RandomPartitioner", hosts) + if err != nil { + t.Fatalf("Failed to create token ring due to error: %v", err) + } + + p := randomPartitioner{} + + var actual *HostInfo + for _, host := range hosts { + actual = ring.GetHostForToken(p.ParseString(host.tokens[0])) + if !actual.ConnectAddress().Equal(host.ConnectAddress()) { + t.Errorf("Expected address %v for token %q, but was %v", host.ConnectAddress(), + host.tokens[0], actual.ConnectAddress()) + } + } + + actual = ring.GetHostForToken(p.ParseString("12")) + if !actual.peer.Equal(hosts[1].peer) { + t.Errorf("Expected address 1 for token \"12\", but was %s", actual.ConnectAddress()) + } + + actual = ring.GetHostForToken(p.ParseString("24324545443332")) + if !actual.ConnectAddress().Equal(hosts[0].ConnectAddress()) { + t.Errorf("Expected address 1 for token \"24324545443332\", but was %s", actual.ConnectAddress()) + } +} diff --git a/vendor/github.com/gocql/gocql/topology_test.go b/vendor/github.com/gocql/gocql/topology_test.go new file mode 100644 index 0000000000..8384824ab7 --- /dev/null +++ b/vendor/github.com/gocql/gocql/topology_test.go @@ -0,0 +1,51 @@ +// +build all unit + +package gocql + +import ( + "testing" +) + +// fakeNode is used as a simple structure to test the RoundRobin API +type fakeNode struct { + conn *Conn + closed bool +} + +// Pick is needed to satisfy the Node interface +func (n *fakeNode) Pick(qry *Query) *Conn { + if n.conn == nil { + n.conn = &Conn{} + } + return n.conn +} + +//Close is needed to satisfy the Node interface +func (n *fakeNode) Close() { + n.closed = true +} + +//TestRoundRobinAPI tests the exported methods of the RoundRobin struct +//to make sure the API behaves accordingly. +func TestRoundRobinAPI(t *testing.T) { + node := &fakeNode{} + rr := NewRoundRobin() + rr.AddNode(node) + + if rr.Size() != 1 { + t.Fatalf("expected size to be 1, got %v", rr.Size()) + } + + if c := rr.Pick(nil); c != node.conn { + t.Fatalf("expected conn %v, got %v", node.conn, c) + } + + rr.Close() + if rr.pool != nil { + t.Fatalf("expected rr.pool to be nil, got %v", rr.pool) + } + + if !node.closed { + t.Fatal("expected node.closed to be true, got false") + } +} diff --git a/vendor/github.com/gocql/gocql/tuple_test.go b/vendor/github.com/gocql/gocql/tuple_test.go new file mode 100644 index 0000000000..1add606c53 --- /dev/null +++ b/vendor/github.com/gocql/gocql/tuple_test.go @@ -0,0 +1,127 @@ +// +build all integration + +package gocql + +import ( + "reflect" + "testing" +) + +func TestTupleSimple(t *testing.T) { + session := createSession(t) + defer session.Close() + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("tuple types are only available of proto>=3") + } + + err := createTable(session, `CREATE TABLE gocql_test.tuple_test( + id int, + coord frozen>, + + primary key(id))`) + if err != nil { + t.Fatal(err) + } + + err = session.Query("INSERT INTO tuple_test(id, coord) VALUES(?, (?, ?))", 1, 100, -100).Exec() + if err != nil { + t.Fatal(err) + } + + var ( + id int + coord struct { + x int + y int + } + ) + + iter := session.Query("SELECT id, coord FROM tuple_test WHERE id=?", 1) + if err := iter.Scan(&id, &coord.x, &coord.y); err != nil { + t.Fatal(err) + } + + if id != 1 { + t.Errorf("expected to get id=1 got: %v", id) + } + if coord.x != 100 { + t.Errorf("expected to get coord.x=100 got: %v", coord.x) + } + if coord.y != -100 { + t.Errorf("expected to get coord.y=-100 got: %v", coord.y) + } +} + +func TestTupleMapScan(t *testing.T) { + session := createSession(t) + defer session.Close() + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("tuple types are only available of proto>=3") + } + + err := createTable(session, `CREATE TABLE gocql_test.tuple_map_scan( + id int, + val frozen>, + + primary key(id))`) + if err != nil { + t.Fatal(err) + } + + if err := session.Query(`INSERT INTO tuple_map_scan (id, val) VALUES (1, (1, 2));`).Exec(); err != nil { + t.Fatal(err) + } + + m := make(map[string]interface{}) + err = session.Query(`SELECT * FROM tuple_map_scan`).MapScan(m) + if err != nil { + t.Fatal(err) + } +} + +func TestTuple_NestedCollection(t *testing.T) { + session := createSession(t) + defer session.Close() + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("tuple types are only available of proto>=3") + } + + err := createTable(session, `CREATE TABLE gocql_test.nested_tuples( + id int, + val list>>, + + primary key(id))`) + if err != nil { + t.Fatal(err) + } + + type typ struct { + A int + B string + } + + tests := []struct { + name string + val interface{} + }{ + {name: "slice", val: [][]interface{}{{1, "2"}, {3, "4"}}}, + {name: "array", val: [][2]interface{}{{1, "2"}, {3, "4"}}}, + {name: "struct", val: []typ{{1, "2"}, {3, "4"}}}, + } + + for i, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := session.Query(`INSERT INTO nested_tuples (id, val) VALUES (?, ?);`, i, test.val).Exec(); err != nil { + t.Fatal(err) + } + + rv := reflect.ValueOf(test.val) + res := reflect.New(rv.Type()).Elem().Addr().Interface() + + err = session.Query(`SELECT val FROM nested_tuples WHERE id=?`, i).Scan(res) + if err != nil { + t.Fatal(err) + } + }) + } +} diff --git a/vendor/github.com/gocql/gocql/udt_test.go b/vendor/github.com/gocql/gocql/udt_test.go new file mode 100644 index 0000000000..f36fd6fdf9 --- /dev/null +++ b/vendor/github.com/gocql/gocql/udt_test.go @@ -0,0 +1,503 @@ +// +build all integration + +package gocql + +import ( + "fmt" + "strings" + "testing" + "time" +) + +type position struct { + Lat int `cql:"lat"` + Lon int `cql:"lon"` + Padding string `json:"padding"` +} + +// NOTE: due to current implementation details it is not currently possible to use +// a pointer receiver type for the UDTMarshaler interface to handle UDT's +func (p position) MarshalUDT(name string, info TypeInfo) ([]byte, error) { + switch name { + case "lat": + return Marshal(info, p.Lat) + case "lon": + return Marshal(info, p.Lon) + case "padding": + return Marshal(info, p.Padding) + default: + return nil, fmt.Errorf("unknown column for position: %q", name) + } +} + +func (p *position) UnmarshalUDT(name string, info TypeInfo, data []byte) error { + switch name { + case "lat": + return Unmarshal(info, data, &p.Lat) + case "lon": + return Unmarshal(info, data, &p.Lon) + case "padding": + return Unmarshal(info, data, &p.Padding) + default: + return fmt.Errorf("unknown column for position: %q", name) + } +} + +func TestUDT_Marshaler(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.position( + lat int, + lon int, + padding text);`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.houses( + id int, + name text, + loc frozen, + + primary key(id) + );`) + if err != nil { + t.Fatal(err) + } + + const ( + expLat = -1 + expLon = 2 + ) + pad := strings.Repeat("X", 1000) + + err = session.Query("INSERT INTO houses(id, name, loc) VALUES(?, ?, ?)", 1, "test", &position{expLat, expLon, pad}).Exec() + if err != nil { + t.Fatal(err) + } + + pos := &position{} + + err = session.Query("SELECT loc FROM houses WHERE id = ?", 1).Scan(pos) + if err != nil { + t.Fatal(err) + } + + if pos.Lat != expLat { + t.Errorf("expeceted lat to be be %d got %d", expLat, pos.Lat) + } + if pos.Lon != expLon { + t.Errorf("expeceted lon to be be %d got %d", expLon, pos.Lon) + } + if pos.Padding != pad { + t.Errorf("expected to get padding %q got %q\n", pad, pos.Padding) + } +} + +func TestUDT_Reflect(t *testing.T) { + // Uses reflection instead of implementing the marshaling type + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.horse( + name text, + owner text);`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.horse_race( + position int, + horse frozen, + + primary key(position) + );`) + if err != nil { + t.Fatal(err) + } + + type horse struct { + Name string `cql:"name"` + Owner string `cql:"owner"` + } + + insertedHorse := &horse{ + Name: "pony", + Owner: "jim", + } + + err = session.Query("INSERT INTO horse_race(position, horse) VALUES(?, ?)", 1, insertedHorse).Exec() + if err != nil { + t.Fatal(err) + } + + retrievedHorse := &horse{} + err = session.Query("SELECT horse FROM horse_race WHERE position = ?", 1).Scan(retrievedHorse) + if err != nil { + t.Fatal(err) + } + + if *retrievedHorse != *insertedHorse { + t.Fatalf("expected to get %+v got %+v", insertedHorse, retrievedHorse) + } +} + +func TestUDT_Proto2error(t *testing.T) { + // TODO(zariel): move this to marshal test? + _, err := Marshal(NativeType{custom: "org.apache.cassandra.db.marshal.UserType.Type", proto: 2}, 1) + if err != ErrorUDTUnavailable { + t.Fatalf("expected %v got %v", ErrUnavailable, err) + } +} + +func TestUDT_NullObject(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.udt_null_type( + name text, + owner text);`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.udt_null_table( + id uuid, + udt_col frozen, + + primary key(id) + );`) + if err != nil { + t.Fatal(err) + } + + type col struct { + Name string `cql:"name"` + Owner string `cql:"owner"` + } + + id := TimeUUID() + err = session.Query("INSERT INTO udt_null_table(id) VALUES(?)", id).Exec() + if err != nil { + t.Fatal(err) + } + + readCol := &col{ + Name: "temp", + Owner: "temp", + } + + err = session.Query("SELECT udt_col FROM udt_null_table WHERE id = ?", id).Scan(readCol) + if err != nil { + t.Fatal(err) + } + + if readCol.Name != "" { + t.Errorf("expected empty string to be returned for null udt: got %q", readCol.Name) + } + if readCol.Owner != "" { + t.Errorf("expected empty string to be returned for null udt: got %q", readCol.Owner) + } +} + +func TestMapScanUDT(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.log_entry ( + created_timestamp timestamp, + message text + );`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.requests_by_id ( + id uuid PRIMARY KEY, + type int, + log_entries list> + );`) + if err != nil { + t.Fatal(err) + } + + entry := []struct { + CreatedTimestamp time.Time `cql:"created_timestamp"` + Message string `cql:"message"` + }{ + { + CreatedTimestamp: time.Now().Truncate(time.Millisecond), + Message: "test time now", + }, + } + + id, _ := RandomUUID() + const typ = 1 + + err = session.Query("INSERT INTO requests_by_id(id, type, log_entries) VALUES (?, ?, ?)", id, typ, entry).Exec() + if err != nil { + t.Fatal(err) + } + + rawResult := map[string]interface{}{} + err = session.Query(`SELECT * FROM requests_by_id WHERE id = ?`, id).MapScan(rawResult) + if err != nil { + t.Fatal(err) + } + + logEntries, ok := rawResult["log_entries"].([]map[string]interface{}) + if !ok { + t.Fatal("log_entries not in scanned map") + } + + if len(logEntries) != 1 { + t.Fatalf("expected to get 1 log_entry got %d", len(logEntries)) + } + + logEntry := logEntries[0] + + timestamp, ok := logEntry["created_timestamp"] + if !ok { + t.Error("created_timestamp not unmarshalled into map") + } else { + if ts, ok := timestamp.(time.Time); ok { + if !ts.In(time.UTC).Equal(entry[0].CreatedTimestamp.In(time.UTC)) { + t.Errorf("created_timestamp not equal to stored: got %v expected %v", ts.In(time.UTC), entry[0].CreatedTimestamp.In(time.UTC)) + } + } else { + t.Errorf("created_timestamp was not time.Time got: %T", timestamp) + } + } + + message, ok := logEntry["message"] + if !ok { + t.Error("message not unmarshalled into map") + } else { + if ts, ok := message.(string); ok { + if ts != message { + t.Errorf("message not equal to stored: got %v expected %v", ts, entry[0].Message) + } + } else { + t.Errorf("message was not string got: %T", message) + } + } +} + +func TestUDT_MissingField(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.missing_field( + name text, + owner text);`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.missing_field( + id uuid, + udt_col frozen, + + primary key(id) + );`) + if err != nil { + t.Fatal(err) + } + + type col struct { + Name string `cql:"name"` + } + + writeCol := &col{ + Name: "test", + } + + id := TimeUUID() + err = session.Query("INSERT INTO missing_field(id, udt_col) VALUES(?, ?)", id, writeCol).Exec() + if err != nil { + t.Fatal(err) + } + + readCol := &col{} + err = session.Query("SELECT udt_col FROM missing_field WHERE id = ?", id).Scan(readCol) + if err != nil { + t.Fatal(err) + } + + if readCol.Name != writeCol.Name { + t.Errorf("expected %q: got %q", writeCol.Name, readCol.Name) + } +} + +func TestUDT_EmptyCollections(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.nil_collections( + a list, + b map, + c set + );`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.nil_collections( + id uuid, + udt_col frozen, + + primary key(id) + );`) + if err != nil { + t.Fatal(err) + } + + type udt struct { + A []string `cql:"a"` + B map[string]string `cql:"b"` + C []string `cql:"c"` + } + + id := TimeUUID() + err = session.Query("INSERT INTO nil_collections(id, udt_col) VALUES(?, ?)", id, &udt{}).Exec() + if err != nil { + t.Fatal(err) + } + + var val udt + err = session.Query("SELECT udt_col FROM nil_collections WHERE id=?", id).Scan(&val) + if err != nil { + t.Fatal(err) + } + + if val.A != nil { + t.Errorf("expected to get nil got %#+v", val.A) + } + if val.B != nil { + t.Errorf("expected to get nil got %#+v", val.B) + } + if val.C != nil { + t.Errorf("expected to get nil got %#+v", val.C) + } +} + +func TestUDT_UpdateField(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.update_field_udt( + name text, + owner text);`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.update_field( + id uuid, + udt_col frozen, + + primary key(id) + );`) + if err != nil { + t.Fatal(err) + } + + type col struct { + Name string `cql:"name"` + Owner string `cql:"owner"` + Data string `cql:"data"` + } + + writeCol := &col{ + Name: "test-name", + Owner: "test-owner", + } + + id := TimeUUID() + err = session.Query("INSERT INTO update_field(id, udt_col) VALUES(?, ?)", id, writeCol).Exec() + if err != nil { + t.Fatal(err) + } + + if err := createTable(session, `ALTER TYPE gocql_test.update_field_udt ADD data text;`); err != nil { + t.Fatal(err) + } + + readCol := &col{} + err = session.Query("SELECT udt_col FROM update_field WHERE id = ?", id).Scan(readCol) + if err != nil { + t.Fatal(err) + } + + if *readCol != *writeCol { + t.Errorf("expected %+v: got %+v", *writeCol, *readCol) + } +} + +func TestUDT_ScanNullUDT(t *testing.T) { + session := createSession(t) + defer session.Close() + + if session.cfg.ProtoVersion < protoVersion3 { + t.Skip("UDT are only available on protocol >= 3") + } + + err := createTable(session, `CREATE TYPE gocql_test.scan_null_udt_position( + lat int, + lon int, + padding text);`) + if err != nil { + t.Fatal(err) + } + + err = createTable(session, `CREATE TABLE gocql_test.scan_null_udt_houses( + id int, + name text, + loc frozen, + primary key(id) + );`) + if err != nil { + t.Fatal(err) + } + + err = session.Query("INSERT INTO scan_null_udt_houses(id, name) VALUES(?, ?)", 1, "test").Exec() + if err != nil { + t.Fatal(err) + } + + pos := &position{} + + err = session.Query("SELECT loc FROM scan_null_udt_houses WHERE id = ?", 1).Scan(pos) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/gocql/gocql/uuid_test.go b/vendor/github.com/gocql/gocql/uuid_test.go new file mode 100644 index 0000000000..96277fb971 --- /dev/null +++ b/vendor/github.com/gocql/gocql/uuid_test.go @@ -0,0 +1,218 @@ +// +build all unit + +package gocql + +import ( + "bytes" + "strings" + "testing" + "time" +) + +func TestUUIDNil(t *testing.T) { + var uuid UUID + want, got := "00000000-0000-0000-0000-000000000000", uuid.String() + if want != got { + t.Fatalf("TestNil: expected %q got %q", want, got) + } +} + +var testsUUID = []struct { + input string + variant int + version int +}{ + {"b4f00409-cef8-4822-802c-deb20704c365", VariantIETF, 4}, + {"B4F00409-CEF8-4822-802C-DEB20704C365", VariantIETF, 4}, //Use capital letters + {"f81d4fae-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1}, + {"00000000-7dec-11d0-a765-00a0c91e6bf6", VariantIETF, 1}, + {"3051a8d7-aea7-1801-e0bf-bc539dd60cf3", VariantFuture, 1}, + {"3051a8d7-aea7-2801-e0bf-bc539dd60cf3", VariantFuture, 2}, + {"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 3}, + {"3051a8d7-aea7-4801-e0bf-bc539dd60cf3", VariantFuture, 4}, + {"3051a8d7-aea7-3801-e0bf-bc539dd60cf3", VariantFuture, 5}, + {"d0e817e1-e4b1-1801-3fe6-b4b60ccecf9d", VariantNCSCompat, 0}, + {"d0e817e1-e4b1-1801-bfe6-b4b60ccecf9d", VariantIETF, 1}, + {"d0e817e1-e4b1-1801-dfe6-b4b60ccecf9d", VariantMicrosoft, 0}, + {"d0e817e1-e4b1-1801-ffe6-b4b60ccecf9d", VariantFuture, 0}, +} + +func TestPredefinedUUID(t *testing.T) { + for i := range testsUUID { + uuid, err := ParseUUID(testsUUID[i].input) + if err != nil { + t.Errorf("ParseUUID #%d: %v", i, err) + continue + } + + if str := uuid.String(); str != strings.ToLower(testsUUID[i].input) { + t.Errorf("String #%d: expected %q got %q", i, testsUUID[i].input, str) + continue + } + + if variant := uuid.Variant(); variant != testsUUID[i].variant { + t.Errorf("Variant #%d: expected %d got %d", i, testsUUID[i].variant, variant) + } + + if testsUUID[i].variant == VariantIETF { + if version := uuid.Version(); version != testsUUID[i].version { + t.Errorf("Version #%d: expected %d got %d", i, testsUUID[i].version, version) + } + } + + json, err := uuid.MarshalJSON() + if err != nil { + t.Errorf("MarshalJSON #%d: %v", i, err) + } + expectedJson := `"` + strings.ToLower(testsUUID[i].input) + `"` + if string(json) != expectedJson { + t.Errorf("MarshalJSON #%d: expected %v got %v", i, expectedJson, string(json)) + } + + var unmarshaled UUID + err = unmarshaled.UnmarshalJSON(json) + if err != nil { + t.Errorf("UnmarshalJSON #%d: %v", i, err) + } + if unmarshaled != uuid { + t.Errorf("UnmarshalJSON #%d: expected %v got %v", i, uuid, unmarshaled) + } + } +} + +func TestInvalidUUIDCharacter(t *testing.T) { + _, err := ParseUUID("z4f00409-cef8-4822-802c-deb20704c365") + if err == nil || !strings.Contains(err.Error(), "invalid UUID") { + t.Fatalf("expected invalid UUID error, got '%v' ", err) + } +} + +func TestInvalidUUIDLength(t *testing.T) { + _, err := ParseUUID("4f00") + if err == nil || !strings.Contains(err.Error(), "invalid UUID") { + t.Fatalf("expected invalid UUID error, got '%v' ", err) + } + + _, err = UUIDFromBytes(TimeUUID().Bytes()[:15]) + if err == nil || err.Error() != "UUIDs must be exactly 16 bytes long" { + t.Fatalf("expected error '%v', got '%v'", "UUIDs must be exactly 16 bytes long", err) + } +} + +func TestRandomUUID(t *testing.T) { + for i := 0; i < 20; i++ { + uuid, err := RandomUUID() + if err != nil { + t.Errorf("RandomUUID: %v", err) + } + if variant := uuid.Variant(); variant != VariantIETF { + t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant) + } + if version := uuid.Version(); version != 4 { + t.Errorf("wrong version. expected %d got %d", 4, version) + } + } +} + +func TestRandomUUIDInvalidAPICalls(t *testing.T) { + uuid, err := RandomUUID() + if err != nil { + t.Fatalf("unexpected error %v", err) + } + + if node := uuid.Node(); node != nil { + t.Fatalf("expected nil, got %v", node) + } + + if stamp := uuid.Timestamp(); stamp != 0 { + t.Fatalf("expceted 0, got %v", stamp) + } + zeroT := time.Time{} + if to := uuid.Time(); to != zeroT { + t.Fatalf("expected %v, got %v", zeroT, to) + } +} + +func TestUUIDFromTime(t *testing.T) { + date := time.Date(1982, 5, 5, 12, 34, 56, 400, time.UTC) + uuid := UUIDFromTime(date) + + if uuid.Time() != date { + t.Errorf("embedded time incorrect. Expected %v got %v", date, uuid.Time()) + } +} + +func TestParseUUID(t *testing.T) { + uuid, _ := ParseUUID("486f3a88-775b-11e3-ae07-d231feb1dc81") + if uuid.Time() != time.Date(2014, 1, 7, 5, 19, 29, 222516000, time.UTC) { + t.Errorf("Expected date of 1/7/2014 at 5:19:29.222516, got %v", uuid.Time()) + } +} + +func TestTimeUUID(t *testing.T) { + var node []byte + timestamp := int64(0) + for i := 0; i < 20; i++ { + uuid := TimeUUID() + + if variant := uuid.Variant(); variant != VariantIETF { + t.Errorf("wrong variant. expected %d got %d", VariantIETF, variant) + } + if version := uuid.Version(); version != 1 { + t.Errorf("wrong version. expected %d got %d", 1, version) + } + + if n := uuid.Node(); !bytes.Equal(n, node) && i > 0 { + t.Errorf("wrong node. expected %x, got %x", node, n) + } else if i == 0 { + node = n + } + + ts := uuid.Timestamp() + if ts < timestamp { + t.Errorf("timestamps must grow: timestamp=%v ts=%v", timestamp, ts) + } + timestamp = ts + } +} + +func TestUnmarshalJSON(t *testing.T) { + var withHyphens, withoutHypens, tooLong UUID + + withHyphens.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81"`)) + if withHyphens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) { + t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withHyphens.Time()) + } + + withoutHypens.UnmarshalJSON([]byte(`"486f3a88775b11e3ae07d231feb1dc81"`)) + if withoutHypens.Time().Truncate(time.Second) != time.Date(2014, 1, 7, 5, 19, 29, 0, time.UTC) { + t.Errorf("Expected date of 1/7/2014 at 5:19:29, got %v", withoutHypens.Time()) + } + + err := tooLong.UnmarshalJSON([]byte(`"486f3a88-775b-11e3-ae07-d231feb1dc81486f3a88"`)) + if err == nil { + t.Errorf("no error for invalid JSON UUID") + } + +} + +func TestMarshalText(t *testing.T) { + u, err := ParseUUID("486f3a88-775b-11e3-ae07-d231feb1dc81") + if err != nil { + t.Fatal(err) + } + + text, err := u.MarshalText() + if err != nil { + t.Fatal(err) + } + + var u2 UUID + if err := u2.UnmarshalText(text); err != nil { + t.Fatal(err) + } + + if u != u2 { + t.Fatalf("uuids not equal after marshalling: before=%s after=%s", u, u2) + } +} diff --git a/vendor/github.com/gocql/gocql/wiki_test.go b/vendor/github.com/gocql/gocql/wiki_test.go new file mode 100644 index 0000000000..04a965aed9 --- /dev/null +++ b/vendor/github.com/gocql/gocql/wiki_test.go @@ -0,0 +1,279 @@ +// +build all integration + +package gocql + +import ( + "fmt" + "reflect" + "sort" + "testing" + "time" + + "gopkg.in/inf.v0" +) + +type WikiPage struct { + Title string + RevId UUID + Body string + Views int64 + Protected bool + Modified time.Time + Rating *inf.Dec + Tags []string + Attachments map[string]WikiAttachment +} + +type WikiAttachment []byte + +var wikiTestData = []*WikiPage{ + { + Title: "Frontpage", + RevId: TimeUUID(), + Body: "Welcome to this wiki page!", + Rating: inf.NewDec(131, 3), + Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC), + Tags: []string{"start", "important", "test"}, + Attachments: map[string]WikiAttachment{ + "logo": WikiAttachment("\x00company logo\x00"), + "favicon": WikiAttachment("favicon.ico"), + }, + }, + { + Title: "Foobar", + RevId: TimeUUID(), + Body: "foo::Foo f = new foo::Foo(foo::Foo::INIT);", + Modified: time.Date(2013, time.August, 13, 9, 52, 3, 0, time.UTC), + }, +} + +type WikiTest struct { + session *Session + tb testing.TB + + table string +} + +func CreateSchema(session *Session, tb testing.TB, table string) *WikiTest { + table = "wiki_" + table + if err := createTable(session, fmt.Sprintf("DROP TABLE IF EXISTS gocql_test.%s", table)); err != nil { + tb.Fatal("CreateSchema:", err) + } + + err := createTable(session, fmt.Sprintf(`CREATE TABLE gocql_test.%s ( + title varchar, + revid timeuuid, + body varchar, + views bigint, + protected boolean, + modified timestamp, + rating decimal, + tags set, + attachments map, + PRIMARY KEY (title, revid) + )`, table)) + + if err != nil { + tb.Fatal("CreateSchema:", err) + } + + return &WikiTest{ + session: session, + tb: tb, + table: table, + } +} + +func (w *WikiTest) CreatePages(n int) { + var page WikiPage + t0 := time.Now() + for i := 0; i < n; i++ { + page.Title = fmt.Sprintf("generated_%d", (i&16)+1) + page.Modified = t0.Add(time.Duration(i-n) * time.Minute) + page.RevId = UUIDFromTime(page.Modified) + page.Body = fmt.Sprintf("text %d", i) + if err := w.InsertPage(&page); err != nil { + w.tb.Error("CreatePages:", err) + } + } +} + +func (w *WikiTest) InsertPage(page *WikiPage) error { + return w.session.Query(fmt.Sprintf(`INSERT INTO %s + (title, revid, body, views, protected, modified, rating, tags, attachments) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`, w.table), + page.Title, page.RevId, page.Body, page.Views, page.Protected, + page.Modified, page.Rating, page.Tags, page.Attachments).Exec() +} + +func (w *WikiTest) SelectPage(page *WikiPage, title string, revid UUID) error { + return w.session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected, + modified,tags, attachments, rating + FROM %s WHERE title = ? AND revid = ? LIMIT 1`, w.table), + title, revid).Scan(&page.Title, &page.RevId, + &page.Body, &page.Views, &page.Protected, &page.Modified, &page.Tags, + &page.Attachments, &page.Rating) +} + +func (w *WikiTest) GetPageCount() int { + var count int + if err := w.session.Query(fmt.Sprintf(`SELECT COUNT(*) FROM %s`, w.table)).Scan(&count); err != nil { + w.tb.Error("GetPageCount", err) + } + return count +} + +func TestWikiCreateSchema(t *testing.T) { + session := createSession(t) + defer session.Close() + + CreateSchema(session, t, "create") +} + +func BenchmarkWikiCreateSchema(b *testing.B) { + b.StopTimer() + session := createSession(b) + defer func() { + b.StopTimer() + session.Close() + }() + + b.StartTimer() + for i := 0; i < b.N; i++ { + CreateSchema(session, b, "bench_create") + } +} + +func TestWikiCreatePages(t *testing.T) { + session := createSession(t) + defer session.Close() + + w := CreateSchema(session, t, "create_pages") + + numPages := 5 + w.CreatePages(numPages) + if count := w.GetPageCount(); count != numPages { + t.Errorf("expected %d pages, got %d pages.", numPages, count) + } +} + +func BenchmarkWikiCreatePages(b *testing.B) { + b.StopTimer() + session := createSession(b) + defer func() { + b.StopTimer() + session.Close() + }() + + w := CreateSchema(session, b, "bench_create_pages") + + b.StartTimer() + + w.CreatePages(b.N) +} + +func BenchmarkWikiSelectAllPages(b *testing.B) { + b.StopTimer() + session := createSession(b) + defer func() { + b.StopTimer() + session.Close() + }() + w := CreateSchema(session, b, "bench_select_all") + + w.CreatePages(100) + b.StartTimer() + + var page WikiPage + for i := 0; i < b.N; i++ { + iter := session.Query(fmt.Sprintf(`SELECT title, revid, body, views, protected, + modified, tags, attachments, rating + FROM %s`, w.table)).Iter() + for iter.Scan(&page.Title, &page.RevId, &page.Body, &page.Views, + &page.Protected, &page.Modified, &page.Tags, &page.Attachments, + &page.Rating) { + // pass + } + if err := iter.Close(); err != nil { + b.Error(err) + } + } +} + +func BenchmarkWikiSelectSinglePage(b *testing.B) { + b.StopTimer() + session := createSession(b) + defer func() { + b.StopTimer() + session.Close() + }() + w := CreateSchema(session, b, "bench_select_single") + pages := make([]WikiPage, 100) + w.CreatePages(len(pages)) + iter := session.Query(fmt.Sprintf(`SELECT title, revid FROM %s`, w.table)).Iter() + for i := 0; i < len(pages); i++ { + if !iter.Scan(&pages[i].Title, &pages[i].RevId) { + pages = pages[:i] + break + } + } + if err := iter.Close(); err != nil { + b.Error(err) + } + b.StartTimer() + + var page WikiPage + for i := 0; i < b.N; i++ { + p := &pages[i%len(pages)] + if err := w.SelectPage(&page, p.Title, p.RevId); err != nil { + b.Error(err) + } + } +} + +func BenchmarkWikiSelectPageCount(b *testing.B) { + b.StopTimer() + session := createSession(b) + defer func() { + b.StopTimer() + session.Close() + }() + + w := CreateSchema(session, b, "bench_page_count") + const numPages = 10 + w.CreatePages(numPages) + b.StartTimer() + for i := 0; i < b.N; i++ { + if count := w.GetPageCount(); count != numPages { + b.Errorf("expected %d pages, got %d pages.", numPages, count) + } + } +} + +func TestWikiTypicalCRUD(t *testing.T) { + session := createSession(t) + defer session.Close() + + w := CreateSchema(session, t, "crud") + + for _, page := range wikiTestData { + if err := w.InsertPage(page); err != nil { + t.Error("InsertPage:", err) + } + } + if count := w.GetPageCount(); count != len(wikiTestData) { + t.Errorf("count: expected %d, got %d\n", len(wikiTestData), count) + } + for _, original := range wikiTestData { + page := new(WikiPage) + if err := w.SelectPage(page, original.Title, original.RevId); err != nil { + t.Error("SelectPage:", err) + continue + } + sort.Sort(sort.StringSlice(page.Tags)) + sort.Sort(sort.StringSlice(original.Tags)) + if !reflect.DeepEqual(page, original) { + t.Errorf("page: expected %#v, got %#v\n", original, page) + } + } +} diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 0000000000..042091d9b3 --- /dev/null +++ b/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS index 824bf2e148..bcfa19520a 100644 --- a/vendor/github.com/golang/snappy/AUTHORS +++ b/vendor/github.com/golang/snappy/AUTHORS @@ -11,4 +11,5 @@ Damian Gryski Google Inc. Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho Sebastien Binet diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS index 9f54f21ff7..931ae31606 100644 --- a/vendor/github.com/golang/snappy/CONTRIBUTORS +++ b/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -32,5 +32,6 @@ Kai Backman Marc-Antoine Ruel Nigel Tao Rob Pike +Rodolfo Carvalho Russ Cox Sebastien Binet diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README index 5074bbab8d..cea12879a0 100644 --- a/vendor/github.com/golang/snappy/README +++ b/vendor/github.com/golang/snappy/README @@ -5,3 +5,103 @@ $ go get github.com/golang/snappy Unless otherwise noted, the Snappy-Go source files are distributed under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go index e7f1259a34..72efb0353d 100644 --- a/vendor/github.com/golang/snappy/decode.go +++ b/vendor/github.com/golang/snappy/decode.go @@ -17,6 +17,8 @@ var ( ErrTooLarge = errors.New("snappy: decoded block is too large") // ErrUnsupported reports that the input isn't supported. ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") ) // DecodedLen returns the length of the decoded block. @@ -40,96 +42,33 @@ func decodedLen(src []byte) (blockLen, headerLen int, err error) { return int(v), n, nil } +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + // Decode returns the decoded form of src. The returned slice may be a sub- // slice of dst if dst was large enough to hold the entire decoded block. // Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. +// +// The dst and src must not overlap. It is valid to pass a nil dst. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { return nil, err } - if len(dst) < dLen { + if dLen <= len(dst) { + dst = dst[:dLen] + } else { dst = make([]byte, dLen) } - - var d, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-1]) - case x == 61: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-2]) | uint(src[s-1])<<8 - case x == 62: - s += 4 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 - case x == 63: - s += 5 - if s > len(src) { - return nil, ErrCorrupt - } - x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 - } - length = int(x + 1) - if length <= 0 { - return nil, errors.New("snappy: unsupported literal length") - } - if length > len(dst)-d || length > len(src)-s { - return nil, ErrCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if s > len(src) { - return nil, ErrCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) - - case tagCopy2: - s += 3 - if s > len(src) { - return nil, ErrCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(src[s-2]) | int(src[s-1])<<8 - - case tagCopy4: - return nil, errors.New("snappy: unsupported COPY_4 tag") - } - - end := d + length - if offset > d || end > len(dst) { - return nil, ErrCorrupt - } - for ; d < end; d++ { - dst[d] = dst[d-offset] - } - } - if d != dLen { - return nil, ErrCorrupt + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength } - return dst[:d], nil + return nil, ErrCorrupt } // NewReader returns a new Reader that decompresses from r, using the framing @@ -138,12 +77,12 @@ func Decode(dst, src []byte) ([]byte, error) { func NewReader(r io.Reader) *Reader { return &Reader{ r: r, - decoded: make([]byte, maxUncompressedChunkLen), - buf: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)+checksumSize), + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), } } -// Reader is an io.Reader than can read Snappy-compressed bytes. +// Reader is an io.Reader that can read Snappy-compressed bytes. type Reader struct { r io.Reader err error @@ -165,9 +104,9 @@ func (r *Reader) Reset(reader io.Reader) { r.readHeader = false } -func (r *Reader) readFull(p []byte) (ok bool) { +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { r.err = ErrCorrupt } return false @@ -186,7 +125,7 @@ func (r *Reader) Read(p []byte) (int, error) { r.i += n return n, nil } - if !r.readFull(r.buf[:4]) { + if !r.readFull(r.buf[:4], true) { return 0, r.err } chunkType := r.buf[0] @@ -213,7 +152,7 @@ func (r *Reader) Read(p []byte) (int, error) { return 0, r.err } buf := r.buf[:chunkLen] - if !r.readFull(buf) { + if !r.readFull(buf, false) { return 0, r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 @@ -246,13 +185,17 @@ func (r *Reader) Read(p []byte) (int, error) { return 0, r.err } buf := r.buf[:checksumSize] - if !r.readFull(buf) { + if !r.readFull(buf, false) { return 0, r.err } checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 // Read directly into r.decoded instead of via r.buf. n := chunkLen - checksumSize - if !r.readFull(r.decoded[:n]) { + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { return 0, r.err } if crc(r.decoded[:n]) != checksum { @@ -268,7 +211,7 @@ func (r *Reader) Read(p []byte) (int, error) { r.err = ErrCorrupt return 0, r.err } - if !r.readFull(r.buf[:len(magicBody)]) { + if !r.readFull(r.buf[:len(magicBody)], false) { return 0, r.err } for i := 0; i < len(magicBody); i++ { @@ -287,7 +230,7 @@ func (r *Reader) Read(p []byte) (int, error) { } // Section 4.4 Padding (chunk type 0xfe). // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen]) { + if !r.readFull(r.buf[:chunkLen], false) { return 0, r.err } } diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 0000000000..fcd192b849 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000000..e6179f65e3 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000000..8c9f2049bc --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go index f3b5484bc7..8d393e904b 100644 --- a/vendor/github.com/golang/snappy/encode.go +++ b/vendor/github.com/golang/snappy/encode.go @@ -6,151 +6,78 @@ package snappy import ( "encoding/binary" + "errors" "io" ) -// We limit how far copy back-references can go, the same as the C++ code. -const maxOffset = 1 << 15 - -// emitLiteral writes a literal chunk and returns the number of bytes written. -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - case n < 1<<16: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - case n < 1<<24: - dst[0] = 62<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - i = 4 - case int64(n) < 1<<32: - dst[0] = 63<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - dst[3] = uint8(n >> 16) - dst[4] = uint8(n >> 24) - i = 5 - default: - panic("snappy: source buffer is too long") - } - if copy(dst[i:], lit) != len(lit) { - panic("snappy: destination buffer is too short") - } - return i + len(lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -func emitCopy(dst []byte, offset, length int) int { - i := 0 - for length > 0 { - x := length - 4 - if 0 <= x && x < 1<<3 && offset < 1<<11 { - dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - i += 2 - break - } - - x = length - if x > 1<<6 { - x = 1 << 6 - } - dst[i+0] = uint8(x-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= x - } - return i -} - // Encode returns the encoded form of src. The returned slice may be a sub- // slice of dst if dst was large enough to hold the entire encoded block. // Otherwise, a newly allocated slice will be returned. -// It is valid to pass a nil dst. +// +// The dst and src must not overlap. It is valid to pass a nil dst. func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); len(dst) < n { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { dst = make([]byte, n) } // The block starts with the varint-encoded length of the decompressed bytes. d := binary.PutUvarint(dst, uint64(len(src))) - // Return early if src is short. - if len(src) <= 4 { - if len(src) != 0 { - d += emitLiteral(dst[d:], src) - } - return dst[:d] - } - - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - const maxTableSize = 1 << 14 - shift, tableSize := uint(32-8), 1<<8 - for tableSize < maxTableSize && tableSize < len(src) { - shift-- - tableSize *= 2 - } - var table [maxTableSize]int - - // Iterate over the source bytes. - var ( - s int // The iterator position. - t int // The last position with the same hash as s. - lit int // The start position of any pending literal bytes. - ) - for s+3 < len(src) { - // Update the hash table. - b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] - h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 - p := &table[(h*0x1e35a7bd)>>shift] - // We need to to store values in [-1, inf) in table. To save - // some initialization time, (re)use the table's zero value - // and shift the values against this zero: add 1 on writes, - // subtract 1 on reads. - t, *p = *p-1, s+1 - // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. - if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { - s++ - continue - } - // Otherwise, we have a match. First, emit any pending literal bytes. - if lit != s { - d += emitLiteral(dst[d:], src[lit:s]) + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] } - // Extend the match to be as long as possible. - s0 := s - s, t = s+4, t+4 - for s < len(src) && src[s] == src[t] { - s++ - t++ + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) } - // Emit the copied bytes. - d += emitCopy(dst[d:], s-t, s-s0) - lit = s - } - - // Emit any final pending literal bytes and return. - if lit != len(src) { - d += emitLiteral(dst[d:], src[lit:]) } return dst[:d] } +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + // MaxEncodedLen returns the maximum length of a snappy block, given its // uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } // Compressed data can be defined as: // compressed := item* literal* // item := literal* copy @@ -171,26 +98,63 @@ func MaxEncodedLen(srcLen int) int { // That is, 6 bytes of input turn into 7 bytes of "compressed" data. // // This last factor dominates the blowup, so the final estimate is: - return 32 + srcLen + srcLen/6 + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) } -// NewWriter returns a new Writer that compresses to w, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. func NewWriter(w io.Writer) *Writer { return &Writer{ - w: w, - enc: make([]byte, MaxEncodedLen(maxUncompressedChunkLen)), + w: w, + obuf: make([]byte, obufLen), } } -// Writer is an io.Writer than can write Snappy-compressed bytes. +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. type Writer struct { - w io.Writer - err error - enc []byte - buf [checksumSize + chunkHeaderSize]byte - wroteHeader bool + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool } // Reset discards the writer's state and switches the Snappy writer to write to @@ -198,26 +162,63 @@ type Writer struct { func (w *Writer) Reset(writer io.Writer) { w.w = writer w.err = nil - w.wroteHeader = false + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false } // Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (n int, errRet error) { - if w.err != nil { - return 0, w.err +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) } - if !w.wroteHeader { - copy(w.enc, magicChunk) - if _, err := w.w.Write(w.enc[:len(magicChunk)]); err != nil { - w.err = err - return n, err + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() } - w.wroteHeader = true + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err } for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + var uncompressed []byte - if len(p) > maxUncompressedChunkLen { - uncompressed, p = p[:maxUncompressedChunkLen], p[maxUncompressedChunkLen:] + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] } else { uncompressed, p = p, nil } @@ -225,30 +226,60 @@ func (w *Writer) Write(p []byte) (n int, errRet error) { // Compress the buffer, discarding the result if the improvement // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) chunkType := uint8(chunkTypeCompressedData) - chunkBody := Encode(w.enc, uncompressed) - if len(chunkBody) >= len(uncompressed)-len(uncompressed)/8 { - chunkType, chunkBody = chunkTypeUncompressedData, uncompressed + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen } - chunkLen := 4 + len(chunkBody) - w.buf[0] = chunkType - w.buf[1] = uint8(chunkLen >> 0) - w.buf[2] = uint8(chunkLen >> 8) - w.buf[3] = uint8(chunkLen >> 16) - w.buf[4] = uint8(checksum >> 0) - w.buf[5] = uint8(checksum >> 8) - w.buf[6] = uint8(checksum >> 16) - w.buf[7] = uint8(checksum >> 24) - if _, err := w.w.Write(w.buf[:]); err != nil { + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { w.err = err - return n, err + return nRet, err } - if _, err := w.w.Write(chunkBody); err != nil { - w.err = err - return n, err + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } } - n += len(uncompressed) + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed } - return n, nil + return ret } diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 0000000000..150d91bc8b --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000000..adfd979fe2 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000000..dbcae905e6 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/golden_test.go b/vendor/github.com/golang/snappy/golden_test.go new file mode 100644 index 0000000000..e4496f92e3 --- /dev/null +++ b/vendor/github.com/golang/snappy/golden_test.go @@ -0,0 +1,1965 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +// extendMatchGoldenTestCases is the i and j arguments, and the returned value, +// for every extendMatch call issued when encoding the +// testdata/Mark.Twain-Tom.Sawyer.txt file. It is used to benchmark the +// extendMatch implementation. +// +// It was generated manually by adding some print statements to the (pure Go) +// extendMatch implementation: +// +// func extendMatch(src []byte, i, j int) int { +// i0, j0 := i, j +// for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { +// } +// println("{", i0, ",", j0, ",", j, "},") +// return j +// } +// +// and running "go test -test.run=EncodeGoldenInput -tags=noasm". +var extendMatchGoldenTestCases = []struct { + i, j, want int +}{ + {11, 61, 62}, + {80, 81, 82}, + {86, 87, 101}, + {85, 133, 149}, + {152, 153, 162}, + {133, 168, 193}, + {168, 207, 225}, + {81, 255, 275}, + {278, 279, 283}, + {306, 417, 417}, + {373, 428, 430}, + {389, 444, 447}, + {474, 510, 512}, + {465, 533, 533}, + {47, 547, 547}, + {307, 551, 554}, + {420, 582, 587}, + {309, 604, 604}, + {604, 625, 625}, + {538, 629, 629}, + {328, 640, 640}, + {573, 645, 645}, + {319, 657, 657}, + {30, 664, 664}, + {45, 679, 680}, + {621, 684, 684}, + {376, 700, 700}, + {33, 707, 708}, + {601, 733, 733}, + {334, 744, 745}, + {625, 758, 759}, + {382, 763, 763}, + {550, 769, 771}, + {533, 789, 789}, + {804, 813, 813}, + {342, 841, 842}, + {742, 847, 847}, + {74, 852, 852}, + {810, 864, 864}, + {758, 868, 869}, + {714, 883, 883}, + {582, 889, 891}, + {61, 934, 935}, + {894, 942, 942}, + {939, 949, 949}, + {785, 956, 957}, + {886, 978, 978}, + {792, 998, 998}, + {998, 1005, 1005}, + {572, 1032, 1032}, + {698, 1051, 1053}, + {599, 1067, 1069}, + {1056, 1079, 1079}, + {942, 1089, 1090}, + {831, 1094, 1096}, + {1088, 1100, 1103}, + {732, 1113, 1114}, + {1037, 1118, 1118}, + {872, 1128, 1130}, + {1079, 1140, 1142}, + {332, 1162, 1162}, + {207, 1168, 1186}, + {1189, 1190, 1225}, + {105, 1229, 1230}, + {79, 1256, 1257}, + {1190, 1261, 1283}, + {255, 1306, 1306}, + {1319, 1339, 1358}, + {364, 1370, 1370}, + {955, 1378, 1380}, + {122, 1403, 1403}, + {1325, 1407, 1419}, + {664, 1423, 1424}, + {941, 1461, 1463}, + {867, 1477, 1478}, + {757, 1488, 1489}, + {1140, 1499, 1499}, + {31, 1506, 1506}, + {1487, 1510, 1512}, + {1089, 1520, 1521}, + {1467, 1525, 1529}, + {1394, 1537, 1537}, + {1499, 1541, 1541}, + {367, 1558, 1558}, + {1475, 1564, 1564}, + {1525, 1568, 1571}, + {1541, 1582, 1583}, + {864, 1587, 1588}, + {704, 1597, 1597}, + {336, 1602, 1602}, + {1383, 1613, 1613}, + {1498, 1617, 1618}, + {1051, 1623, 1625}, + {401, 1643, 1645}, + {1072, 1654, 1655}, + {1067, 1667, 1669}, + {699, 1673, 1674}, + {1587, 1683, 1684}, + {920, 1696, 1696}, + {1505, 1710, 1710}, + {1550, 1723, 1723}, + {996, 1727, 1727}, + {833, 1733, 1734}, + {1638, 1739, 1740}, + {1654, 1744, 1744}, + {753, 1761, 1761}, + {1548, 1773, 1773}, + {1568, 1777, 1780}, + {1683, 1793, 1794}, + {948, 1801, 1801}, + {1666, 1805, 1808}, + {1502, 1814, 1814}, + {1696, 1822, 1822}, + {502, 1836, 1837}, + {917, 1843, 1843}, + {1733, 1854, 1855}, + {970, 1859, 1859}, + {310, 1863, 1863}, + {657, 1872, 1872}, + {1005, 1876, 1876}, + {1662, 1880, 1880}, + {904, 1892, 1892}, + {1427, 1910, 1910}, + {1772, 1929, 1930}, + {1822, 1937, 1940}, + {1858, 1949, 1950}, + {1602, 1956, 1956}, + {1150, 1962, 1962}, + {1504, 1966, 1967}, + {51, 1971, 1971}, + {1605, 1979, 1979}, + {1458, 1983, 1988}, + {1536, 2001, 2006}, + {1373, 2014, 2018}, + {1494, 2025, 2025}, + {1667, 2029, 2031}, + {1592, 2035, 2035}, + {330, 2045, 2045}, + {1376, 2053, 2053}, + {1991, 2058, 2059}, + {1635, 2065, 2065}, + {1992, 2073, 2074}, + {2014, 2080, 2081}, + {1546, 2085, 2087}, + {59, 2099, 2099}, + {1996, 2106, 2106}, + {1836, 2110, 2110}, + {2068, 2114, 2114}, + {1338, 2122, 2122}, + {1562, 2128, 2130}, + {1934, 2134, 2134}, + {2114, 2141, 2142}, + {977, 2149, 2150}, + {956, 2154, 2155}, + {1407, 2162, 2162}, + {1773, 2166, 2166}, + {883, 2171, 2171}, + {623, 2175, 2178}, + {1520, 2191, 2192}, + {1162, 2200, 2200}, + {912, 2204, 2204}, + {733, 2208, 2208}, + {1777, 2212, 2215}, + {1532, 2219, 2219}, + {718, 2223, 2225}, + {2069, 2229, 2229}, + {2207, 2245, 2246}, + {1139, 2264, 2264}, + {677, 2274, 2274}, + {2099, 2279, 2279}, + {1863, 2283, 2283}, + {1966, 2305, 2306}, + {2279, 2313, 2313}, + {1628, 2319, 2319}, + {755, 2329, 2329}, + {1461, 2334, 2334}, + {2117, 2340, 2340}, + {2313, 2349, 2349}, + {1859, 2353, 2353}, + {1048, 2362, 2362}, + {895, 2366, 2366}, + {2278, 2373, 2373}, + {1884, 2377, 2377}, + {1402, 2387, 2392}, + {700, 2398, 2398}, + {1971, 2402, 2402}, + {2009, 2419, 2419}, + {1441, 2426, 2428}, + {2208, 2432, 2432}, + {2038, 2436, 2436}, + {932, 2443, 2443}, + {1759, 2447, 2448}, + {744, 2452, 2452}, + {1875, 2458, 2458}, + {2405, 2468, 2468}, + {1596, 2472, 2473}, + {1953, 2480, 2482}, + {736, 2487, 2487}, + {1913, 2493, 2493}, + {774, 2497, 2497}, + {1484, 2506, 2508}, + {2432, 2512, 2512}, + {752, 2519, 2519}, + {2497, 2523, 2523}, + {2409, 2528, 2529}, + {2122, 2533, 2533}, + {2396, 2537, 2538}, + {2410, 2547, 2548}, + {1093, 2555, 2560}, + {551, 2564, 2565}, + {2268, 2569, 2569}, + {1362, 2580, 2580}, + {1916, 2584, 2585}, + {994, 2589, 2590}, + {1979, 2596, 2596}, + {1041, 2602, 2602}, + {2104, 2614, 2616}, + {2609, 2621, 2628}, + {2329, 2638, 2638}, + {2211, 2657, 2658}, + {2638, 2662, 2667}, + {2578, 2676, 2679}, + {2153, 2685, 2686}, + {2608, 2696, 2697}, + {598, 2712, 2712}, + {2620, 2719, 2720}, + {1888, 2724, 2728}, + {2709, 2732, 2732}, + {1365, 2739, 2739}, + {784, 2747, 2748}, + {424, 2753, 2753}, + {2204, 2759, 2759}, + {812, 2768, 2769}, + {2455, 2773, 2773}, + {1722, 2781, 2781}, + {1917, 2792, 2792}, + {2705, 2799, 2799}, + {2685, 2806, 2807}, + {2742, 2811, 2811}, + {1370, 2818, 2818}, + {2641, 2830, 2830}, + {2512, 2837, 2837}, + {2457, 2841, 2841}, + {2756, 2845, 2845}, + {2719, 2855, 2855}, + {1423, 2859, 2859}, + {2849, 2863, 2865}, + {1474, 2871, 2871}, + {1161, 2875, 2876}, + {2282, 2880, 2881}, + {2746, 2888, 2888}, + {1783, 2893, 2893}, + {2401, 2899, 2900}, + {2632, 2920, 2923}, + {2422, 2928, 2930}, + {2715, 2939, 2939}, + {2162, 2943, 2943}, + {2859, 2947, 2947}, + {1910, 2951, 2951}, + {1431, 2955, 2956}, + {1439, 2964, 2964}, + {2501, 2968, 2969}, + {2029, 2973, 2976}, + {689, 2983, 2984}, + {1658, 2988, 2988}, + {1031, 2996, 2996}, + {2149, 3001, 3002}, + {25, 3009, 3013}, + {2964, 3023, 3023}, + {953, 3027, 3028}, + {2359, 3036, 3036}, + {3023, 3049, 3049}, + {2880, 3055, 3056}, + {2973, 3076, 3077}, + {2874, 3090, 3090}, + {2871, 3094, 3094}, + {2532, 3100, 3100}, + {2938, 3107, 3108}, + {350, 3115, 3115}, + {2196, 3119, 3121}, + {1133, 3127, 3129}, + {1797, 3134, 3150}, + {3032, 3158, 3158}, + {3016, 3172, 3172}, + {2533, 3179, 3179}, + {3055, 3187, 3188}, + {1384, 3192, 3193}, + {2799, 3199, 3199}, + {2126, 3203, 3207}, + {2334, 3215, 3215}, + {2105, 3220, 3221}, + {3199, 3229, 3229}, + {2891, 3233, 3233}, + {855, 3240, 3240}, + {1852, 3253, 3256}, + {2140, 3263, 3263}, + {1682, 3268, 3270}, + {3243, 3274, 3274}, + {924, 3279, 3279}, + {2212, 3283, 3283}, + {2596, 3287, 3287}, + {2999, 3291, 3291}, + {2353, 3295, 3295}, + {2480, 3302, 3304}, + {1959, 3308, 3311}, + {3000, 3318, 3318}, + {845, 3330, 3330}, + {2283, 3334, 3334}, + {2519, 3342, 3342}, + {3325, 3346, 3348}, + {2397, 3353, 3354}, + {2763, 3358, 3358}, + {3198, 3363, 3364}, + {3211, 3368, 3372}, + {2950, 3376, 3377}, + {3245, 3388, 3391}, + {2264, 3398, 3398}, + {795, 3403, 3403}, + {3287, 3407, 3407}, + {3358, 3411, 3411}, + {3317, 3415, 3415}, + {3232, 3431, 3431}, + {2128, 3435, 3437}, + {3236, 3441, 3441}, + {3398, 3445, 3446}, + {2814, 3450, 3450}, + {3394, 3466, 3466}, + {2425, 3470, 3470}, + {3330, 3476, 3476}, + {1612, 3480, 3480}, + {1004, 3485, 3486}, + {2732, 3490, 3490}, + {1117, 3494, 3495}, + {629, 3501, 3501}, + {3087, 3514, 3514}, + {684, 3518, 3518}, + {3489, 3522, 3524}, + {1760, 3529, 3529}, + {617, 3537, 3537}, + {3431, 3541, 3541}, + {997, 3547, 3547}, + {882, 3552, 3553}, + {2419, 3558, 3558}, + {610, 3562, 3563}, + {1903, 3567, 3569}, + {3005, 3575, 3575}, + {3076, 3585, 3586}, + {3541, 3590, 3590}, + {3490, 3594, 3594}, + {1899, 3599, 3599}, + {3545, 3606, 3606}, + {3290, 3614, 3615}, + {2056, 3619, 3620}, + {3556, 3625, 3625}, + {3294, 3632, 3633}, + {637, 3643, 3644}, + {3609, 3648, 3650}, + {3175, 3658, 3658}, + {3498, 3665, 3665}, + {1597, 3669, 3669}, + {1983, 3673, 3673}, + {3215, 3682, 3682}, + {3544, 3689, 3689}, + {3694, 3698, 3698}, + {3228, 3715, 3716}, + {2594, 3720, 3722}, + {3573, 3726, 3726}, + {2479, 3732, 3735}, + {3191, 3741, 3742}, + {1113, 3746, 3747}, + {2844, 3751, 3751}, + {3445, 3756, 3757}, + {3755, 3766, 3766}, + {3421, 3775, 3780}, + {3593, 3784, 3786}, + {3263, 3796, 3796}, + {3469, 3806, 3806}, + {2602, 3815, 3815}, + {723, 3819, 3821}, + {1608, 3826, 3826}, + {3334, 3830, 3830}, + {2198, 3835, 3835}, + {2635, 3840, 3840}, + {3702, 3852, 3853}, + {3406, 3858, 3859}, + {3681, 3867, 3870}, + {3407, 3880, 3880}, + {340, 3889, 3889}, + {3772, 3893, 3893}, + {593, 3897, 3897}, + {2563, 3914, 3916}, + {2981, 3929, 3929}, + {1835, 3933, 3934}, + {3906, 3951, 3951}, + {1459, 3958, 3958}, + {3889, 3974, 3974}, + {2188, 3982, 3982}, + {3220, 3986, 3987}, + {3585, 3991, 3993}, + {3712, 3997, 4001}, + {2805, 4007, 4007}, + {1879, 4012, 4013}, + {3618, 4018, 4018}, + {1145, 4031, 4032}, + {3901, 4037, 4037}, + {2772, 4046, 4047}, + {2802, 4053, 4054}, + {3299, 4058, 4058}, + {3725, 4066, 4066}, + {2271, 4070, 4070}, + {385, 4075, 4076}, + {3624, 4089, 4090}, + {3745, 4096, 4098}, + {1563, 4102, 4102}, + {4045, 4106, 4111}, + {3696, 4115, 4119}, + {3376, 4125, 4126}, + {1880, 4130, 4130}, + {2048, 4140, 4141}, + {2724, 4149, 4149}, + {1767, 4156, 4156}, + {2601, 4164, 4164}, + {2757, 4168, 4168}, + {3974, 4172, 4172}, + {3914, 4178, 4178}, + {516, 4185, 4185}, + {1032, 4189, 4190}, + {3462, 4197, 4198}, + {3805, 4202, 4203}, + {3910, 4207, 4212}, + {3075, 4221, 4221}, + {3756, 4225, 4226}, + {1872, 4236, 4237}, + {3844, 4241, 4241}, + {3991, 4245, 4249}, + {2203, 4258, 4258}, + {3903, 4267, 4268}, + {705, 4272, 4272}, + {1896, 4276, 4276}, + {1955, 4285, 4288}, + {3746, 4302, 4303}, + {2672, 4311, 4311}, + {3969, 4317, 4317}, + {3883, 4322, 4322}, + {1920, 4339, 4340}, + {3527, 4344, 4346}, + {1160, 4358, 4358}, + {3648, 4364, 4366}, + {2711, 4387, 4387}, + {3619, 4391, 4392}, + {1944, 4396, 4396}, + {4369, 4400, 4400}, + {2736, 4404, 4407}, + {2546, 4411, 4412}, + {4390, 4422, 4422}, + {3610, 4426, 4427}, + {4058, 4431, 4431}, + {4374, 4435, 4435}, + {3463, 4445, 4446}, + {1813, 4452, 4452}, + {3669, 4456, 4456}, + {3830, 4460, 4460}, + {421, 4464, 4465}, + {1719, 4471, 4471}, + {3880, 4475, 4475}, + {1834, 4485, 4487}, + {3590, 4491, 4491}, + {442, 4496, 4497}, + {4435, 4501, 4501}, + {3814, 4509, 4509}, + {987, 4513, 4513}, + {4494, 4518, 4521}, + {3218, 4526, 4529}, + {4221, 4537, 4537}, + {2778, 4543, 4545}, + {4422, 4552, 4552}, + {4031, 4558, 4559}, + {4178, 4563, 4563}, + {3726, 4567, 4574}, + {4027, 4578, 4578}, + {4339, 4585, 4587}, + {3796, 4592, 4595}, + {543, 4600, 4613}, + {2855, 4620, 4621}, + {2795, 4627, 4627}, + {3440, 4631, 4632}, + {4279, 4636, 4639}, + {4245, 4643, 4645}, + {4516, 4649, 4650}, + {3133, 4654, 4654}, + {4042, 4658, 4659}, + {3422, 4663, 4663}, + {4046, 4667, 4668}, + {4267, 4672, 4672}, + {4004, 4676, 4677}, + {2490, 4682, 4682}, + {2451, 4697, 4697}, + {3027, 4705, 4705}, + {4028, 4717, 4717}, + {4460, 4721, 4721}, + {2471, 4725, 4727}, + {3090, 4735, 4735}, + {3192, 4739, 4740}, + {3835, 4760, 4760}, + {4540, 4764, 4764}, + {4007, 4772, 4774}, + {619, 4784, 4784}, + {3561, 4789, 4791}, + {3367, 4805, 4805}, + {4490, 4810, 4811}, + {2402, 4815, 4815}, + {3352, 4819, 4822}, + {2773, 4828, 4828}, + {4552, 4832, 4832}, + {2522, 4840, 4841}, + {316, 4847, 4852}, + {4715, 4858, 4858}, + {2959, 4862, 4862}, + {4858, 4868, 4869}, + {2134, 4873, 4873}, + {578, 4878, 4878}, + {4189, 4889, 4890}, + {2229, 4894, 4894}, + {4501, 4898, 4898}, + {2297, 4903, 4903}, + {2933, 4909, 4909}, + {3008, 4913, 4913}, + {3153, 4917, 4917}, + {4819, 4921, 4921}, + {4921, 4932, 4933}, + {4920, 4944, 4945}, + {4814, 4954, 4955}, + {576, 4966, 4966}, + {1854, 4970, 4971}, + {1374, 4975, 4976}, + {3307, 4980, 4980}, + {974, 4984, 4988}, + {4721, 4992, 4992}, + {4898, 4996, 4996}, + {4475, 5006, 5006}, + {3819, 5012, 5012}, + {1948, 5019, 5021}, + {4954, 5027, 5029}, + {3740, 5038, 5040}, + {4763, 5044, 5045}, + {1936, 5051, 5051}, + {4844, 5055, 5060}, + {4215, 5069, 5072}, + {1146, 5076, 5076}, + {3845, 5082, 5082}, + {4865, 5090, 5090}, + {4624, 5094, 5094}, + {4815, 5098, 5098}, + {5006, 5105, 5105}, + {4980, 5109, 5109}, + {4795, 5113, 5115}, + {5043, 5119, 5121}, + {4782, 5129, 5129}, + {3826, 5139, 5139}, + {3876, 5156, 5156}, + {3111, 5167, 5171}, + {1470, 5177, 5177}, + {4431, 5181, 5181}, + {546, 5189, 5189}, + {4225, 5193, 5193}, + {1672, 5199, 5201}, + {4207, 5205, 5209}, + {4220, 5216, 5217}, + {4658, 5224, 5225}, + {3295, 5235, 5235}, + {2436, 5239, 5239}, + {2349, 5246, 5246}, + {2175, 5250, 5250}, + {5180, 5257, 5258}, + {3161, 5263, 5263}, + {5105, 5272, 5272}, + {3552, 5282, 5282}, + {4944, 5299, 5300}, + {4130, 5312, 5313}, + {902, 5323, 5323}, + {913, 5327, 5327}, + {2987, 5333, 5334}, + {5150, 5344, 5344}, + {5249, 5348, 5348}, + {1965, 5358, 5359}, + {5330, 5364, 5364}, + {2012, 5373, 5377}, + {712, 5384, 5386}, + {5235, 5390, 5390}, + {5044, 5398, 5399}, + {564, 5406, 5406}, + {39, 5410, 5410}, + {4642, 5422, 5425}, + {4421, 5437, 5438}, + {2347, 5449, 5449}, + {5333, 5453, 5454}, + {4136, 5458, 5459}, + {3793, 5468, 5468}, + {2243, 5480, 5480}, + {4889, 5492, 5493}, + {4295, 5504, 5504}, + {2785, 5511, 5511}, + {2377, 5518, 5518}, + {3662, 5525, 5525}, + {5097, 5529, 5530}, + {4781, 5537, 5538}, + {4697, 5547, 5548}, + {436, 5552, 5553}, + {5542, 5558, 5558}, + {3692, 5562, 5562}, + {2696, 5568, 5569}, + {4620, 5578, 5578}, + {2898, 5590, 5590}, + {5557, 5596, 5618}, + {2797, 5623, 5625}, + {2792, 5629, 5629}, + {5243, 5633, 5633}, + {5348, 5637, 5637}, + {5547, 5643, 5643}, + {4296, 5654, 5655}, + {5568, 5662, 5662}, + {3001, 5670, 5671}, + {3794, 5679, 5679}, + {4006, 5685, 5686}, + {4969, 5690, 5692}, + {687, 5704, 5704}, + {4563, 5708, 5708}, + {1723, 5738, 5738}, + {649, 5742, 5742}, + {5163, 5748, 5755}, + {3907, 5759, 5759}, + {3074, 5764, 5764}, + {5326, 5771, 5771}, + {2951, 5776, 5776}, + {5181, 5780, 5780}, + {2614, 5785, 5788}, + {4709, 5794, 5794}, + {2784, 5799, 5799}, + {5518, 5803, 5803}, + {4155, 5812, 5815}, + {921, 5819, 5819}, + {5224, 5823, 5824}, + {2853, 5830, 5836}, + {5776, 5840, 5840}, + {2955, 5844, 5845}, + {5745, 5853, 5853}, + {3291, 5857, 5857}, + {2988, 5861, 5861}, + {2647, 5865, 5865}, + {5398, 5869, 5870}, + {1085, 5874, 5875}, + {4906, 5881, 5881}, + {802, 5886, 5886}, + {5119, 5890, 5893}, + {5802, 5899, 5900}, + {3415, 5904, 5904}, + {5629, 5908, 5908}, + {3714, 5912, 5914}, + {5558, 5921, 5921}, + {2710, 5927, 5928}, + {1094, 5932, 5934}, + {2653, 5940, 5941}, + {4735, 5954, 5954}, + {5861, 5958, 5958}, + {1040, 5971, 5971}, + {5514, 5977, 5977}, + {5048, 5981, 5982}, + {5953, 5992, 5993}, + {3751, 5997, 5997}, + {4991, 6001, 6002}, + {5885, 6006, 6007}, + {5529, 6011, 6012}, + {4974, 6019, 6020}, + {5857, 6024, 6024}, + {3483, 6032, 6032}, + {3594, 6036, 6036}, + {1997, 6040, 6040}, + {5997, 6044, 6047}, + {5197, 6051, 6051}, + {1764, 6055, 6055}, + {6050, 6059, 6059}, + {5239, 6063, 6063}, + {5049, 6067, 6067}, + {5957, 6073, 6074}, + {1022, 6078, 6078}, + {3414, 6083, 6084}, + {3809, 6090, 6090}, + {4562, 6095, 6096}, + {5878, 6104, 6104}, + {594, 6108, 6109}, + {3353, 6115, 6116}, + {4992, 6120, 6121}, + {2424, 6125, 6125}, + {4484, 6130, 6130}, + {3900, 6134, 6135}, + {5793, 6139, 6141}, + {3562, 6145, 6145}, + {1438, 6152, 6153}, + {6058, 6157, 6158}, + {4411, 6162, 6163}, + {4590, 6167, 6171}, + {4748, 6175, 6175}, + {5517, 6183, 6184}, + {6095, 6191, 6192}, + {1471, 6203, 6203}, + {2643, 6209, 6210}, + {450, 6220, 6220}, + {5266, 6226, 6226}, + {2576, 6233, 6233}, + {2607, 6239, 6240}, + {5164, 6244, 6251}, + {6054, 6255, 6255}, + {1789, 6260, 6261}, + {5250, 6265, 6265}, + {6062, 6273, 6278}, + {5990, 6282, 6282}, + {3283, 6286, 6286}, + {5436, 6290, 6290}, + {6059, 6294, 6294}, + {5668, 6298, 6300}, + {3072, 6324, 6329}, + {3132, 6338, 6339}, + {3246, 6343, 6344}, + {28, 6348, 6349}, + {1503, 6353, 6355}, + {6067, 6359, 6359}, + {3384, 6364, 6364}, + {545, 6375, 6376}, + {5803, 6380, 6380}, + {5522, 6384, 6385}, + {5908, 6389, 6389}, + {2796, 6393, 6396}, + {4831, 6403, 6404}, + {6388, 6412, 6412}, + {6005, 6417, 6420}, + {4450, 6430, 6430}, + {4050, 6435, 6435}, + {5372, 6441, 6441}, + {4378, 6447, 6447}, + {6199, 6452, 6452}, + {3026, 6456, 6456}, + {2642, 6460, 6462}, + {6392, 6470, 6470}, + {6459, 6474, 6474}, + {2829, 6487, 6488}, + {2942, 6499, 6504}, + {5069, 6508, 6511}, + {5341, 6515, 6516}, + {5853, 6521, 6525}, + {6104, 6531, 6531}, + {5759, 6535, 6538}, + {4672, 6542, 6543}, + {2443, 6550, 6550}, + {5109, 6554, 6554}, + {6494, 6558, 6560}, + {6006, 6570, 6572}, + {6424, 6576, 6580}, + {4693, 6591, 6592}, + {6439, 6596, 6597}, + {3179, 6601, 6601}, + {5299, 6606, 6607}, + {4148, 6612, 6613}, + {3774, 6617, 6617}, + {3537, 6623, 6624}, + {4975, 6628, 6629}, + {3848, 6636, 6636}, + {856, 6640, 6640}, + {5724, 6645, 6645}, + {6632, 6651, 6651}, + {4630, 6656, 6658}, + {1440, 6662, 6662}, + {4281, 6666, 6667}, + {4302, 6671, 6672}, + {2589, 6676, 6677}, + {5647, 6681, 6687}, + {6082, 6691, 6693}, + {6144, 6698, 6698}, + {6103, 6709, 6710}, + {3710, 6714, 6714}, + {4253, 6718, 6721}, + {2467, 6730, 6730}, + {4778, 6734, 6734}, + {6528, 6738, 6738}, + {4358, 6747, 6747}, + {5889, 6753, 6753}, + {5193, 6757, 6757}, + {5797, 6761, 6761}, + {3858, 6765, 6766}, + {5951, 6776, 6776}, + {6487, 6781, 6782}, + {3282, 6786, 6787}, + {4667, 6797, 6799}, + {1927, 6803, 6806}, + {6583, 6810, 6810}, + {4937, 6814, 6814}, + {6099, 6824, 6824}, + {4415, 6835, 6836}, + {6332, 6840, 6841}, + {5160, 6850, 6850}, + {4764, 6854, 6854}, + {6814, 6858, 6859}, + {3018, 6864, 6864}, + {6293, 6868, 6869}, + {6359, 6877, 6877}, + {3047, 6884, 6886}, + {5262, 6890, 6891}, + {5471, 6900, 6900}, + {3268, 6910, 6912}, + {1047, 6916, 6916}, + {5904, 6923, 6923}, + {5798, 6933, 6938}, + {4149, 6942, 6942}, + {1821, 6946, 6946}, + {3599, 6952, 6952}, + {6470, 6957, 6957}, + {5562, 6961, 6961}, + {6268, 6965, 6967}, + {6389, 6971, 6971}, + {6596, 6975, 6976}, + {6553, 6980, 6981}, + {6576, 6985, 6989}, + {1375, 6993, 6993}, + {652, 6998, 6998}, + {4876, 7002, 7003}, + {5768, 7011, 7013}, + {3973, 7017, 7017}, + {6802, 7025, 7025}, + {6955, 7034, 7036}, + {6974, 7040, 7040}, + {5944, 7044, 7044}, + {6992, 7048, 7054}, + {6872, 7059, 7059}, + {2943, 7063, 7063}, + {6923, 7067, 7067}, + {5094, 7071, 7071}, + {4873, 7075, 7075}, + {5819, 7079, 7079}, + {5945, 7085, 7085}, + {1540, 7090, 7091}, + {2090, 7095, 7095}, + {5024, 7104, 7105}, + {6900, 7109, 7109}, + {6024, 7113, 7114}, + {6000, 7118, 7120}, + {2187, 7124, 7125}, + {6760, 7129, 7130}, + {5898, 7134, 7136}, + {7032, 7144, 7144}, + {4271, 7148, 7148}, + {3706, 7152, 7152}, + {6970, 7156, 7157}, + {7088, 7161, 7163}, + {2718, 7168, 7169}, + {5674, 7175, 7175}, + {4631, 7182, 7182}, + {7070, 7188, 7189}, + {6220, 7196, 7196}, + {3458, 7201, 7202}, + {2041, 7211, 7212}, + {1454, 7216, 7216}, + {5199, 7225, 7227}, + {3529, 7234, 7234}, + {6890, 7238, 7238}, + {3815, 7242, 7243}, + {5490, 7250, 7253}, + {6554, 7257, 7263}, + {5890, 7267, 7269}, + {6877, 7273, 7273}, + {4877, 7277, 7277}, + {2502, 7285, 7285}, + {1483, 7289, 7295}, + {7210, 7304, 7308}, + {6845, 7313, 7316}, + {7219, 7320, 7320}, + {7001, 7325, 7329}, + {6853, 7333, 7334}, + {6120, 7338, 7338}, + {6606, 7342, 7343}, + {7020, 7348, 7350}, + {3509, 7354, 7354}, + {7133, 7359, 7363}, + {3434, 7371, 7374}, + {2787, 7384, 7384}, + {7044, 7388, 7388}, + {6960, 7394, 7395}, + {6676, 7399, 7400}, + {7161, 7404, 7404}, + {7285, 7417, 7418}, + {4558, 7425, 7426}, + {4828, 7430, 7430}, + {6063, 7436, 7436}, + {3597, 7442, 7442}, + {914, 7446, 7446}, + {7320, 7452, 7454}, + {7267, 7458, 7460}, + {5076, 7464, 7464}, + {7430, 7468, 7469}, + {6273, 7473, 7474}, + {7440, 7478, 7487}, + {7348, 7491, 7494}, + {1021, 7510, 7510}, + {7473, 7515, 7515}, + {2823, 7519, 7519}, + {6264, 7527, 7527}, + {7302, 7531, 7531}, + {7089, 7535, 7535}, + {7342, 7540, 7541}, + {3688, 7547, 7551}, + {3054, 7558, 7560}, + {4177, 7566, 7567}, + {6691, 7574, 7575}, + {7156, 7585, 7586}, + {7147, 7590, 7592}, + {7407, 7598, 7598}, + {7403, 7602, 7603}, + {6868, 7607, 7607}, + {6636, 7611, 7611}, + {4805, 7617, 7617}, + {5779, 7623, 7623}, + {7063, 7627, 7627}, + {5079, 7632, 7632}, + {7377, 7637, 7637}, + {7337, 7641, 7642}, + {6738, 7655, 7655}, + {7338, 7659, 7659}, + {6541, 7669, 7671}, + {595, 7675, 7675}, + {7658, 7679, 7680}, + {7647, 7685, 7686}, + {2477, 7690, 7690}, + {5823, 7694, 7694}, + {4156, 7699, 7699}, + {5931, 7703, 7706}, + {6854, 7712, 7712}, + {4931, 7718, 7718}, + {6979, 7722, 7722}, + {5085, 7727, 7727}, + {6965, 7732, 7732}, + {7201, 7736, 7737}, + {3639, 7741, 7743}, + {7534, 7749, 7749}, + {4292, 7753, 7753}, + {3427, 7759, 7763}, + {7273, 7767, 7767}, + {940, 7778, 7778}, + {4838, 7782, 7785}, + {4216, 7790, 7792}, + {922, 7800, 7801}, + {7256, 7810, 7811}, + {7789, 7815, 7819}, + {7225, 7823, 7825}, + {7531, 7829, 7829}, + {6997, 7833, 7833}, + {7757, 7837, 7838}, + {4129, 7842, 7842}, + {7333, 7848, 7849}, + {6776, 7855, 7855}, + {7527, 7859, 7859}, + {4370, 7863, 7863}, + {4512, 7868, 7868}, + {5679, 7880, 7880}, + {3162, 7884, 7885}, + {3933, 7892, 7894}, + {7804, 7899, 7902}, + {6363, 7906, 7907}, + {7848, 7911, 7912}, + {5584, 7917, 7921}, + {874, 7926, 7926}, + {3342, 7930, 7930}, + {4507, 7935, 7937}, + {3672, 7943, 7944}, + {7911, 7948, 7949}, + {6402, 7956, 7956}, + {7940, 7960, 7960}, + {7113, 7964, 7964}, + {1073, 7968, 7968}, + {7740, 7974, 7974}, + {7601, 7978, 7982}, + {6797, 7987, 7988}, + {3528, 7994, 7995}, + {5483, 7999, 7999}, + {5717, 8011, 8011}, + {5480, 8017, 8017}, + {7770, 8023, 8030}, + {2452, 8034, 8034}, + {5282, 8047, 8047}, + {7967, 8051, 8051}, + {1128, 8058, 8066}, + {6348, 8070, 8070}, + {8055, 8077, 8077}, + {7925, 8081, 8086}, + {6810, 8090, 8090}, + {5051, 8101, 8101}, + {4696, 8109, 8110}, + {5129, 8119, 8119}, + {4449, 8123, 8123}, + {7222, 8127, 8127}, + {4649, 8131, 8134}, + {7994, 8138, 8138}, + {5954, 8148, 8148}, + {475, 8152, 8153}, + {7906, 8157, 8157}, + {7458, 8164, 8166}, + {7632, 8171, 8173}, + {3874, 8177, 8183}, + {4391, 8187, 8187}, + {561, 8191, 8191}, + {2417, 8195, 8195}, + {2357, 8204, 8204}, + {2269, 8216, 8218}, + {3968, 8222, 8222}, + {2200, 8226, 8227}, + {3453, 8247, 8247}, + {2439, 8251, 8252}, + {7175, 8257, 8257}, + {976, 8262, 8264}, + {4953, 8273, 8273}, + {4219, 8278, 8278}, + {6, 8285, 8291}, + {5703, 8295, 8296}, + {5272, 8300, 8300}, + {8037, 8304, 8304}, + {8186, 8314, 8314}, + {8304, 8318, 8318}, + {8051, 8326, 8326}, + {8318, 8330, 8330}, + {2671, 8334, 8335}, + {2662, 8339, 8339}, + {8081, 8349, 8350}, + {3328, 8356, 8356}, + {2879, 8360, 8362}, + {8050, 8370, 8371}, + {8330, 8375, 8376}, + {8375, 8386, 8386}, + {4961, 8390, 8390}, + {1017, 8403, 8405}, + {3533, 8416, 8416}, + {4555, 8422, 8422}, + {6445, 8426, 8426}, + {8169, 8432, 8432}, + {990, 8436, 8436}, + {4102, 8440, 8440}, + {7398, 8444, 8446}, + {3480, 8450, 8450}, + {6324, 8462, 8462}, + {7948, 8466, 8467}, + {5950, 8471, 8471}, + {5189, 8476, 8476}, + {4026, 8490, 8490}, + {8374, 8494, 8495}, + {4682, 8501, 8501}, + {7387, 8506, 8506}, + {8164, 8510, 8515}, + {4079, 8524, 8524}, + {8360, 8529, 8531}, + {7446, 8540, 8543}, + {7971, 8547, 8548}, + {4311, 8552, 8552}, + {5204, 8556, 8557}, + {7968, 8562, 8562}, + {7847, 8571, 8573}, + {8547, 8577, 8577}, + {5320, 8581, 8581}, + {8556, 8585, 8586}, + {8504, 8590, 8590}, + {7669, 8602, 8604}, + {5874, 8608, 8609}, + {5828, 8613, 8613}, + {7998, 8617, 8617}, + {8519, 8625, 8625}, + {7250, 8637, 8637}, + {426, 8641, 8641}, + {8436, 8645, 8645}, + {5986, 8649, 8656}, + {8157, 8660, 8660}, + {7182, 8665, 8665}, + {8421, 8675, 8675}, + {8509, 8681, 8681}, + {5137, 8688, 8689}, + {8625, 8694, 8695}, + {5228, 8701, 8702}, + {6661, 8714, 8714}, + {1010, 8719, 8719}, + {6648, 8723, 8723}, + {3500, 8728, 8728}, + {2442, 8735, 8735}, + {8494, 8740, 8741}, + {8171, 8753, 8755}, + {7242, 8763, 8764}, + {4739, 8768, 8769}, + {7079, 8773, 8773}, + {8386, 8777, 8777}, + {8624, 8781, 8787}, + {661, 8791, 8794}, + {8631, 8801, 8801}, + {7753, 8805, 8805}, + {4783, 8809, 8810}, + {1673, 8814, 8815}, + {6623, 8819, 8819}, + {4404, 8823, 8823}, + {8089, 8827, 8828}, + {8773, 8832, 8832}, + {5394, 8836, 8836}, + {6231, 8841, 8843}, + {1015, 8852, 8853}, + {6873, 8857, 8857}, + {6289, 8865, 8865}, + {8577, 8869, 8869}, + {8114, 8873, 8875}, + {8534, 8883, 8883}, + {3007, 8887, 8888}, + {8827, 8892, 8893}, + {4788, 8897, 8900}, + {5698, 8906, 8907}, + {7690, 8911, 8911}, + {6643, 8919, 8919}, + {7206, 8923, 8924}, + {7866, 8929, 8931}, + {8880, 8942, 8942}, + {8630, 8951, 8952}, + {6027, 8958, 8958}, + {7749, 8966, 8967}, + {4932, 8972, 8973}, + {8892, 8980, 8981}, + {634, 9003, 9003}, + {8109, 9007, 9008}, + {8777, 9012, 9012}, + {3981, 9016, 9017}, + {5723, 9025, 9025}, + {7662, 9034, 9038}, + {8955, 9042, 9042}, + {8070, 9060, 9062}, + {8910, 9066, 9066}, + {5363, 9070, 9071}, + {7699, 9075, 9076}, + {8991, 9081, 9081}, + {6850, 9085, 9085}, + {5811, 9092, 9094}, + {9079, 9098, 9102}, + {6456, 9106, 9106}, + {2259, 9111, 9111}, + {4752, 9116, 9116}, + {9060, 9120, 9123}, + {8090, 9127, 9127}, + {5305, 9131, 9132}, + {8623, 9137, 9137}, + {7417, 9141, 9141}, + {6564, 9148, 9149}, + {9126, 9157, 9158}, + {4285, 9169, 9170}, + {8698, 9174, 9174}, + {8869, 9178, 9178}, + {2572, 9182, 9183}, + {6482, 9188, 9190}, + {9181, 9201, 9201}, + {2968, 9208, 9209}, + {2506, 9213, 9215}, + {9127, 9219, 9219}, + {7910, 9225, 9227}, + {5422, 9235, 9239}, + {8813, 9244, 9246}, + {9178, 9250, 9250}, + {8748, 9255, 9255}, + {7354, 9265, 9265}, + {7767, 9269, 9269}, + {7710, 9281, 9283}, + {8826, 9288, 9290}, + {861, 9295, 9295}, + {4482, 9301, 9301}, + {9264, 9305, 9306}, + {8805, 9310, 9310}, + {4995, 9314, 9314}, + {6730, 9318, 9318}, + {7457, 9328, 9328}, + {2547, 9335, 9336}, + {6298, 9340, 9343}, + {9305, 9353, 9354}, + {9269, 9358, 9358}, + {6338, 9370, 9370}, + {7289, 9376, 9379}, + {5780, 9383, 9383}, + {7607, 9387, 9387}, + {2065, 9392, 9392}, + {7238, 9396, 9396}, + {8856, 9400, 9400}, + {8069, 9412, 9413}, + {611, 9420, 9420}, + {7071, 9424, 9424}, + {3089, 9430, 9431}, + {7117, 9435, 9438}, + {1976, 9445, 9445}, + {6640, 9449, 9449}, + {5488, 9453, 9453}, + {8739, 9457, 9459}, + {5958, 9466, 9466}, + {7985, 9470, 9470}, + {8735, 9475, 9475}, + {5009, 9479, 9479}, + {8073, 9483, 9484}, + {2328, 9490, 9491}, + {9250, 9495, 9495}, + {4043, 9502, 9502}, + {7712, 9506, 9506}, + {9012, 9510, 9510}, + {9028, 9514, 9515}, + {2190, 9521, 9524}, + {9029, 9528, 9528}, + {9519, 9532, 9532}, + {9495, 9536, 9536}, + {8527, 9540, 9540}, + {2137, 9550, 9550}, + {8419, 9557, 9557}, + {9383, 9561, 9562}, + {8970, 9575, 9578}, + {8911, 9582, 9582}, + {7828, 9595, 9596}, + {6180, 9600, 9600}, + {8738, 9604, 9607}, + {7540, 9611, 9612}, + {9599, 9616, 9618}, + {9187, 9623, 9623}, + {9294, 9628, 9629}, + {4536, 9639, 9639}, + {3867, 9643, 9643}, + {6305, 9648, 9648}, + {1617, 9654, 9657}, + {5762, 9666, 9666}, + {8314, 9670, 9670}, + {9666, 9674, 9675}, + {9506, 9679, 9679}, + {9669, 9685, 9686}, + {9683, 9690, 9690}, + {8763, 9697, 9698}, + {7468, 9702, 9702}, + {460, 9707, 9707}, + {3115, 9712, 9712}, + {9424, 9716, 9717}, + {7359, 9721, 9724}, + {7547, 9728, 9729}, + {7151, 9733, 9738}, + {7627, 9742, 9742}, + {2822, 9747, 9747}, + {8247, 9751, 9753}, + {9550, 9758, 9758}, + {7585, 9762, 9763}, + {1002, 9767, 9767}, + {7168, 9772, 9773}, + {6941, 9777, 9780}, + {9728, 9784, 9786}, + {9770, 9792, 9796}, + {6411, 9801, 9802}, + {3689, 9806, 9808}, + {9575, 9814, 9816}, + {7025, 9820, 9821}, + {2776, 9826, 9826}, + {9806, 9830, 9830}, + {9820, 9834, 9835}, + {9800, 9839, 9847}, + {9834, 9851, 9852}, + {9829, 9856, 9862}, + {1400, 9866, 9866}, + {3197, 9870, 9871}, + {9851, 9875, 9876}, + {9742, 9883, 9884}, + {3362, 9888, 9889}, + {9883, 9893, 9893}, + {5711, 9899, 9910}, + {7806, 9915, 9915}, + {9120, 9919, 9919}, + {9715, 9925, 9934}, + {2580, 9938, 9938}, + {4907, 9942, 9944}, + {6239, 9953, 9954}, + {6961, 9963, 9963}, + {5295, 9967, 9968}, + {1915, 9972, 9973}, + {3426, 9983, 9985}, + {9875, 9994, 9995}, + {6942, 9999, 9999}, + {6621, 10005, 10005}, + {7589, 10010, 10012}, + {9286, 10020, 10020}, + {838, 10024, 10024}, + {9980, 10028, 10031}, + {9994, 10035, 10041}, + {2702, 10048, 10051}, + {2621, 10059, 10059}, + {10054, 10065, 10065}, + {8612, 10073, 10074}, + {7033, 10078, 10078}, + {916, 10082, 10082}, + {10035, 10086, 10087}, + {8613, 10097, 10097}, + {9919, 10107, 10108}, + {6133, 10114, 10115}, + {10059, 10119, 10119}, + {10065, 10126, 10127}, + {7732, 10131, 10131}, + {7155, 10135, 10136}, + {6728, 10140, 10140}, + {6162, 10144, 10145}, + {4724, 10150, 10150}, + {1665, 10154, 10154}, + {10126, 10163, 10163}, + {9783, 10168, 10168}, + {1715, 10172, 10173}, + {7152, 10177, 10182}, + {8760, 10187, 10187}, + {7829, 10191, 10191}, + {9679, 10196, 10196}, + {9369, 10201, 10201}, + {2928, 10206, 10208}, + {6951, 10214, 10217}, + {5633, 10221, 10221}, + {7199, 10225, 10225}, + {10118, 10230, 10231}, + {9999, 10235, 10236}, + {10045, 10240, 10249}, + {5565, 10256, 10256}, + {9866, 10261, 10261}, + {10163, 10268, 10268}, + {9869, 10272, 10272}, + {9789, 10276, 10283}, + {10235, 10287, 10288}, + {10214, 10298, 10299}, + {6971, 10303, 10303}, + {3346, 10307, 10307}, + {10185, 10311, 10312}, + {9993, 10318, 10320}, + {2779, 10332, 10334}, + {1726, 10338, 10338}, + {741, 10354, 10360}, + {10230, 10372, 10373}, + {10260, 10384, 10385}, + {10131, 10389, 10398}, + {6946, 10406, 10409}, + {10158, 10413, 10420}, + {10123, 10424, 10424}, + {6157, 10428, 10429}, + {4518, 10434, 10434}, + {9893, 10438, 10438}, + {9865, 10442, 10446}, + {7558, 10454, 10454}, + {10434, 10460, 10460}, + {10064, 10466, 10468}, + {2703, 10472, 10474}, + {9751, 10478, 10479}, + {6714, 10485, 10485}, + {8020, 10490, 10490}, + {10303, 10494, 10494}, + {3521, 10499, 10500}, + {9281, 10513, 10515}, + {6028, 10519, 10523}, + {9387, 10527, 10527}, + {7614, 10531, 10531}, + {3611, 10536, 10536}, + {9162, 10540, 10540}, + {10081, 10546, 10547}, + {10034, 10560, 10562}, + {6726, 10567, 10571}, + {8237, 10575, 10575}, + {10438, 10579, 10583}, + {10140, 10587, 10587}, + {5784, 10592, 10592}, + {9819, 10597, 10600}, + {10567, 10604, 10608}, + {9335, 10613, 10613}, + {8300, 10617, 10617}, + {10575, 10621, 10621}, + {9678, 10625, 10626}, + {9962, 10632, 10633}, + {10535, 10637, 10638}, + {8199, 10642, 10642}, + {10372, 10647, 10648}, + {10637, 10656, 10657}, + {10579, 10667, 10668}, + {10465, 10677, 10680}, + {6702, 10684, 10685}, + {10073, 10691, 10692}, + {4505, 10696, 10697}, + {9042, 10701, 10701}, + {6460, 10705, 10706}, + {10010, 10714, 10716}, + {10656, 10720, 10722}, + {7282, 10727, 10729}, + {2327, 10733, 10733}, + {2491, 10740, 10741}, + {10704, 10748, 10750}, + {6465, 10754, 10754}, + {10647, 10758, 10759}, + {10424, 10763, 10763}, + {10748, 10776, 10776}, + {10546, 10780, 10781}, + {10758, 10785, 10786}, + {10287, 10790, 10797}, + {10785, 10801, 10807}, + {10240, 10811, 10826}, + {9509, 10830, 10830}, + {2579, 10836, 10838}, + {9801, 10843, 10845}, + {7555, 10849, 10850}, + {10776, 10860, 10865}, + {8023, 10869, 10869}, + {10046, 10876, 10884}, + {10253, 10888, 10892}, + {9941, 10897, 10897}, + {7898, 10901, 10905}, + {6725, 10909, 10913}, + {10757, 10921, 10923}, + {10160, 10931, 10931}, + {10916, 10935, 10942}, + {10261, 10946, 10946}, + {10318, 10952, 10954}, + {5911, 10959, 10961}, + {10801, 10965, 10966}, + {10946, 10970, 10977}, + {10592, 10982, 10984}, + {9913, 10988, 10990}, + {8510, 10994, 10996}, + {9419, 11000, 11001}, + {6765, 11006, 11007}, + {10725, 11011, 11011}, + {5537, 11017, 11019}, + {9208, 11024, 11025}, + {5850, 11030, 11030}, + {9610, 11034, 11036}, + {8846, 11041, 11047}, + {9697, 11051, 11051}, + {1622, 11055, 11058}, + {2370, 11062, 11062}, + {8393, 11067, 11067}, + {9756, 11071, 11071}, + {10172, 11076, 11076}, + {27, 11081, 11081}, + {7357, 11087, 11092}, + {8151, 11104, 11106}, + {6115, 11110, 11110}, + {10667, 11114, 11115}, + {11099, 11121, 11123}, + {10705, 11127, 11127}, + {8938, 11131, 11131}, + {11114, 11135, 11136}, + {1390, 11140, 11141}, + {10964, 11146, 11148}, + {11140, 11152, 11155}, + {9813, 11159, 11166}, + {624, 11171, 11172}, + {3118, 11177, 11179}, + {11029, 11184, 11186}, + {10186, 11190, 11190}, + {10306, 11196, 11196}, + {8665, 11201, 11201}, + {7382, 11205, 11205}, + {1100, 11210, 11210}, + {2337, 11216, 11217}, + {1609, 11221, 11223}, + {5763, 11228, 11229}, + {5220, 11233, 11233}, + {11061, 11241, 11241}, + {10617, 11246, 11246}, + {11190, 11250, 11251}, + {10144, 11255, 11256}, + {11232, 11260, 11260}, + {857, 11264, 11265}, + {10994, 11269, 11271}, + {3879, 11280, 11281}, + {11184, 11287, 11289}, + {9611, 11293, 11295}, + {11250, 11299, 11299}, + {4495, 11304, 11304}, + {7574, 11308, 11309}, + {9814, 11315, 11317}, + {1713, 11321, 11324}, + {1905, 11328, 11328}, + {8745, 11335, 11340}, + {8883, 11351, 11351}, + {8119, 11358, 11358}, + {1842, 11363, 11364}, + {11237, 11368, 11368}, + {8814, 11373, 11374}, + {5684, 11378, 11378}, + {11011, 11382, 11382}, + {6520, 11389, 11389}, + {11183, 11393, 11396}, + {1790, 11404, 11404}, + {9536, 11408, 11408}, + {11298, 11418, 11419}, + {3929, 11425, 11425}, + {5588, 11429, 11429}, + {8476, 11436, 11436}, + {4096, 11440, 11442}, + {11084, 11446, 11454}, + {10603, 11458, 11463}, + {7332, 11472, 11474}, + {7611, 11483, 11486}, + {4836, 11490, 11491}, + {10024, 11495, 11495}, + {4917, 11501, 11506}, + {6486, 11510, 11512}, + {11269, 11516, 11518}, + {3603, 11522, 11525}, + {11126, 11535, 11535}, + {11418, 11539, 11541}, + {11408, 11545, 11545}, + {9021, 11549, 11552}, + {6745, 11557, 11557}, + {5118, 11561, 11564}, + {7590, 11568, 11569}, + {4426, 11573, 11578}, + {9790, 11582, 11583}, + {6447, 11587, 11587}, + {10229, 11591, 11594}, + {10457, 11598, 11598}, + {10168, 11604, 11604}, + {10543, 11608, 11608}, + {7404, 11612, 11612}, + {11127, 11616, 11616}, + {3337, 11620, 11620}, + {11501, 11624, 11628}, + {4543, 11633, 11635}, + {8449, 11642, 11642}, + {4943, 11646, 11648}, + {10526, 11652, 11654}, + {11620, 11659, 11659}, + {8927, 11664, 11669}, + {532, 11673, 11673}, + {10513, 11677, 11679}, + {10428, 11683, 11683}, + {10999, 11689, 11690}, + {9469, 11695, 11695}, + {3606, 11699, 11699}, + {9560, 11708, 11709}, + {1564, 11714, 11714}, + {10527, 11718, 11718}, + {3071, 11723, 11726}, + {11590, 11731, 11732}, + {6605, 11737, 11737}, + {11624, 11741, 11745}, + {7822, 11749, 11752}, + {5269, 11757, 11758}, + {1339, 11767, 11767}, + {1363, 11771, 11773}, + {3704, 11777, 11777}, + {10952, 11781, 11783}, + {6764, 11793, 11795}, + {8675, 11800, 11800}, + {9963, 11804, 11804}, + {11573, 11808, 11809}, + {9548, 11813, 11813}, + {11591, 11817, 11818}, + {11446, 11822, 11822}, + {9224, 11828, 11828}, + {3158, 11836, 11836}, + {10830, 11840, 11840}, + {7234, 11846, 11846}, + {11299, 11850, 11850}, + {11544, 11854, 11855}, + {11498, 11859, 11859}, + {10993, 11865, 11868}, + {9720, 11872, 11878}, + {10489, 11882, 11890}, + {11712, 11898, 11904}, + {11516, 11908, 11910}, + {11568, 11914, 11915}, + {10177, 11919, 11924}, + {11363, 11928, 11929}, + {10494, 11933, 11933}, + {9870, 11937, 11938}, + {9427, 11942, 11942}, + {11481, 11949, 11949}, + {6030, 11955, 11957}, + {11718, 11961, 11961}, + {10531, 11965, 11983}, + {5126, 11987, 11987}, + {7515, 11991, 11991}, + {10646, 11996, 11997}, + {2947, 12001, 12001}, + {9582, 12009, 12010}, + {6202, 12017, 12018}, + {11714, 12022, 12022}, + {9235, 12033, 12037}, + {9721, 12041, 12044}, + {11932, 12051, 12052}, + {12040, 12056, 12056}, + {12051, 12060, 12060}, + {11601, 12066, 12066}, + {8426, 12070, 12070}, + {4053, 12077, 12077}, + {4262, 12081, 12081}, + {9761, 12086, 12088}, + {11582, 12092, 12093}, + {10965, 12097, 12098}, + {11803, 12103, 12104}, + {11933, 12108, 12109}, + {10688, 12117, 12117}, + {12107, 12125, 12126}, + {6774, 12130, 12132}, + {6286, 12137, 12137}, + {9543, 12141, 12141}, + {12097, 12145, 12146}, + {10790, 12150, 12150}, + {10125, 12154, 12156}, + {12125, 12164, 12164}, + {12064, 12168, 12172}, + {10811, 12178, 12188}, + {12092, 12192, 12193}, + {10058, 12197, 12198}, + {11611, 12211, 12212}, + {3459, 12216, 12216}, + {10291, 12225, 12228}, + {12191, 12232, 12234}, + {12145, 12238, 12238}, + {12001, 12242, 12250}, + {3840, 12255, 12255}, + {12216, 12259, 12259}, + {674, 12272, 12272}, + {12141, 12276, 12276}, + {10766, 12280, 12280}, + {11545, 12284, 12284}, + {6496, 12290, 12290}, + {11381, 12294, 12295}, + {603, 12302, 12303}, + {12276, 12308, 12308}, + {11850, 12313, 12314}, + {565, 12319, 12319}, + {9351, 12324, 12324}, + {11822, 12328, 12328}, + {2691, 12333, 12334}, + {11840, 12338, 12338}, + {11070, 12343, 12343}, + {9510, 12347, 12347}, + {11024, 12352, 12353}, + {7173, 12359, 12359}, + {517, 12363, 12363}, + {6311, 12367, 12368}, + {11367, 12372, 12373}, + {12008, 12377, 12377}, + {11372, 12382, 12384}, + {11358, 12391, 12392}, + {11382, 12396, 12396}, + {6882, 12400, 12401}, + {11246, 12405, 12405}, + {8359, 12409, 12412}, + {10154, 12418, 12418}, + {12016, 12425, 12426}, + {8972, 12434, 12435}, + {10478, 12439, 12440}, + {12395, 12449, 12449}, + {11612, 12454, 12454}, + {12347, 12458, 12458}, + {10700, 12466, 12467}, + {3637, 12471, 12476}, + {1042, 12480, 12481}, + {6747, 12488, 12488}, + {12396, 12492, 12493}, + {9420, 12497, 12497}, + {11285, 12501, 12510}, + {4470, 12515, 12515}, + {9374, 12519, 12519}, + {11293, 12528, 12528}, + {2058, 12534, 12535}, + {6521, 12539, 12539}, + {12492, 12543, 12543}, + {3043, 12547, 12547}, + {2982, 12551, 12553}, + {11030, 12557, 12563}, + {7636, 12568, 12568}, + {9639, 12572, 12572}, + {12543, 12576, 12576}, + {5989, 12580, 12583}, + {11051, 12587, 12587}, + {1061, 12592, 12594}, + {12313, 12599, 12601}, + {11846, 12605, 12605}, + {12576, 12609, 12609}, + {11040, 12618, 12625}, + {12479, 12629, 12629}, + {6903, 12633, 12633}, + {12322, 12639, 12639}, + {12253, 12643, 12645}, + {5594, 12651, 12651}, + {12522, 12655, 12655}, + {11703, 12659, 12659}, + {1377, 12665, 12665}, + {8022, 12669, 12669}, + {12280, 12674, 12674}, + {9023, 12680, 12681}, + {12328, 12685, 12685}, + {3085, 12689, 12693}, + {4700, 12698, 12698}, + {10224, 12702, 12702}, + {8781, 12706, 12706}, + {1651, 12710, 12710}, + {12458, 12714, 12714}, + {12005, 12718, 12721}, + {11908, 12725, 12726}, + {8202, 12733, 12733}, + {11708, 12739, 12740}, + {12599, 12744, 12745}, + {12284, 12749, 12749}, + {5285, 12756, 12756}, + {12055, 12775, 12777}, + {6919, 12782, 12782}, + {12242, 12786, 12786}, + {12009, 12790, 12790}, + {9628, 12794, 12796}, + {11354, 12801, 12802}, + {10225, 12806, 12807}, + {579, 12813, 12813}, + {8935, 12817, 12822}, + {8753, 12827, 12829}, + {11006, 12835, 12835}, + {858, 12841, 12845}, + {476, 12849, 12849}, + {7667, 12854, 12854}, + {12760, 12860, 12871}, + {11677, 12875, 12877}, + {12714, 12881, 12881}, + {12731, 12885, 12890}, + {7108, 12894, 12896}, + {1165, 12900, 12900}, + {4021, 12906, 12906}, + {10829, 12910, 12911}, + {12331, 12915, 12915}, + {8887, 12919, 12921}, + {11639, 12925, 12925}, + {7964, 12929, 12929}, + {12528, 12937, 12937}, + {8148, 12941, 12941}, + {12770, 12948, 12950}, + {12609, 12954, 12954}, + {12685, 12958, 12958}, + {2803, 12962, 12962}, + {9561, 12966, 12966}, + {6671, 12972, 12973}, + {12056, 12977, 12977}, + {6380, 12981, 12981}, + {12048, 12985, 12985}, + {11961, 12989, 12993}, + {3368, 12997, 12999}, + {6634, 13004, 13004}, + {6775, 13009, 13010}, + {12136, 13014, 13019}, + {10341, 13023, 13023}, + {13002, 13027, 13027}, + {10587, 13031, 13031}, + {10307, 13035, 13035}, + {12736, 13039, 13039}, + {12744, 13043, 13044}, + {6175, 13048, 13048}, + {9702, 13053, 13054}, + {662, 13059, 13061}, + {12718, 13065, 13068}, + {12893, 13072, 13075}, + {8299, 13086, 13091}, + {12604, 13095, 13096}, + {12848, 13100, 13101}, + {12749, 13105, 13105}, + {12526, 13109, 13114}, + {9173, 13122, 13122}, + {12769, 13128, 13128}, + {13038, 13132, 13132}, + {12725, 13136, 13137}, + {12639, 13146, 13146}, + {9711, 13150, 13151}, + {12137, 13155, 13155}, + {13039, 13159, 13159}, + {4681, 13163, 13164}, + {12954, 13168, 13168}, + {13158, 13175, 13176}, + {13105, 13180, 13180}, + {10754, 13184, 13184}, + {13167, 13188, 13188}, + {12658, 13192, 13192}, + {4294, 13199, 13200}, + {11682, 13204, 13205}, + {11695, 13209, 13209}, + {11076, 13214, 13214}, + {12232, 13218, 13218}, + {9399, 13223, 13224}, + {12880, 13228, 13229}, + {13048, 13234, 13234}, + {9701, 13238, 13239}, + {13209, 13243, 13243}, + {3658, 13248, 13248}, + {3698, 13252, 13254}, + {12237, 13260, 13260}, + {8872, 13266, 13266}, + {12957, 13272, 13273}, + {1393, 13281, 13281}, + {2013, 13285, 13288}, + {4244, 13296, 13299}, + {9428, 13303, 13303}, + {12702, 13307, 13307}, + {13078, 13311, 13311}, + {6071, 13315, 13315}, + {3061, 13319, 13319}, + {2051, 13324, 13324}, + {11560, 13328, 13331}, + {6584, 13336, 13336}, + {8482, 13340, 13340}, + {5331, 13344, 13344}, + {4171, 13348, 13348}, + {8501, 13352, 13352}, + {9219, 13356, 13356}, + {9473, 13360, 13363}, + {12881, 13367, 13367}, + {13065, 13371, 13375}, + {2979, 13379, 13384}, + {1518, 13388, 13388}, + {11177, 13392, 13392}, + {9457, 13398, 13398}, + {12293, 13407, 13410}, + {3697, 13414, 13417}, + {10338, 13425, 13425}, + {13367, 13429, 13429}, + {11074, 13433, 13437}, + {4201, 13441, 13443}, + {1812, 13447, 13448}, + {13360, 13452, 13456}, + {13188, 13463, 13463}, + {9732, 13470, 13470}, + {11332, 13477, 13477}, + {9918, 13487, 13487}, + {6337, 13497, 13497}, + {13429, 13501, 13501}, + {11413, 13505, 13505}, + {4685, 13512, 13513}, + {13136, 13517, 13519}, + {7416, 13528, 13530}, + {12929, 13534, 13534}, + {11110, 13539, 13539}, + {11521, 13543, 13543}, + {12825, 13553, 13553}, + {13447, 13557, 13558}, + {12299, 13562, 13563}, + {9003, 13570, 13570}, + {12500, 13577, 13577}, + {13501, 13581, 13581}, + {9392, 13586, 13586}, + {12454, 13590, 13590}, + {6189, 13595, 13595}, + {13053, 13599, 13599}, + {11881, 13604, 13604}, + {13159, 13608, 13608}, + {4894, 13612, 13612}, + {13221, 13621, 13621}, + {8950, 13625, 13625}, + {13533, 13629, 13629}, + {9633, 13633, 13633}, + {7892, 13637, 13639}, + {13581, 13643, 13643}, + {13616, 13647, 13649}, + {12794, 13653, 13654}, + {8919, 13659, 13659}, + {9674, 13663, 13663}, + {13577, 13668, 13668}, + {12966, 13672, 13672}, + {12659, 13676, 13683}, + {6124, 13688, 13688}, + {9225, 13693, 13695}, + {11833, 13702, 13702}, + {12904, 13709, 13717}, + {13647, 13721, 13722}, + {11687, 13726, 13727}, + {12434, 13731, 13732}, + {12689, 13736, 13742}, + {13168, 13746, 13746}, + {6151, 13751, 13752}, + {11821, 13756, 13757}, + {6467, 13764, 13764}, + {5730, 13769, 13769}, + {5136, 13780, 13780}, + {724, 13784, 13785}, + {13517, 13789, 13791}, + {640, 13795, 13796}, + {7721, 13800, 13802}, + {11121, 13806, 13807}, + {5791, 13811, 13815}, + {12894, 13819, 13819}, + {11100, 13824, 13824}, + {7011, 13830, 13830}, + {7129, 13834, 13837}, + {13833, 13841, 13841}, + {11276, 13847, 13847}, + {13621, 13853, 13853}, + {13589, 13862, 13863}, + {12989, 13867, 13867}, + {12789, 13871, 13871}, + {1239, 13875, 13875}, + {4675, 13879, 13881}, + {4686, 13885, 13885}, + {707, 13889, 13889}, + {5449, 13897, 13898}, + {13867, 13902, 13903}, + {10613, 13908, 13908}, + {13789, 13912, 13914}, + {4451, 13918, 13919}, + {9200, 13924, 13924}, + {2011, 13930, 13930}, + {11433, 13934, 13936}, + {4695, 13942, 13943}, + {9435, 13948, 13951}, + {13688, 13955, 13957}, + {11694, 13961, 13962}, + {5712, 13966, 13966}, + {5991, 13970, 13972}, + {13477, 13976, 13976}, + {10213, 13987, 13987}, + {11839, 13991, 13993}, + {12272, 13997, 13997}, + {6206, 14001, 14001}, + {13179, 14006, 14007}, + {2939, 14011, 14011}, + {12972, 14016, 14017}, + {13918, 14021, 14022}, + {7436, 14026, 14027}, + {7678, 14032, 14034}, + {13586, 14040, 14040}, + {13347, 14044, 14044}, + {13109, 14048, 14051}, + {9244, 14055, 14057}, + {13315, 14061, 14061}, + {13276, 14067, 14067}, + {11435, 14073, 14074}, + {13853, 14078, 14078}, + {13452, 14082, 14082}, + {14044, 14087, 14087}, + {4440, 14091, 14095}, + {4479, 14100, 14103}, + {9395, 14107, 14109}, + {6834, 14119, 14119}, + {10458, 14123, 14124}, + {1429, 14129, 14129}, + {8443, 14135, 14135}, + {10365, 14140, 14140}, + {5267, 14145, 14145}, + {11834, 14151, 14153}, +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go index 15af18d0a5..0cf5e379c4 100644 --- a/vendor/github.com/golang/snappy/snappy.go +++ b/vendor/github.com/golang/snappy/snappy.go @@ -32,7 +32,10 @@ Lempel-Ziv compression algorithms. In particular: - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). The length is 1 + m. The offset is the little-endian unsigned integer denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer supported. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. */ const ( tagLiteral = 0x00 @@ -46,9 +49,25 @@ const ( chunkHeaderSize = 4 magicChunk = "\xff\x06\x00\x00" + magicBody magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 bytes". - maxUncompressedChunkLen = 65536 + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize ) const ( diff --git a/vendor/github.com/golang/snappy/snappy_test.go b/vendor/github.com/golang/snappy/snappy_test.go new file mode 100644 index 0000000000..2712710df5 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy_test.go @@ -0,0 +1,1353 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "encoding/binary" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" +) + +var ( + download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + testdataDir = flag.String("testdataDir", "testdata", "Directory containing the test data") + benchdataDir = flag.String("benchdataDir", "testdata/bench", "Directory containing the benchmark data") +) + +// goEncoderShouldMatchCppEncoder is whether to test that the algorithm used by +// Go's encoder matches byte-for-byte what the C++ snappy encoder produces, on +// this GOARCH. There is more than one valid encoding of any given input, and +// there is more than one good algorithm along the frontier of trading off +// throughput for output size. Nonetheless, we presume that the C++ encoder's +// algorithm is a good one and has been tested on a wide range of inputs, so +// matching that exactly should mean that the Go encoder's algorithm is also +// good, without needing to gather our own corpus of test data. +// +// The exact algorithm used by the C++ code is potentially endian dependent, as +// it puns a byte pointer to a uint32 pointer to load, hash and compare 4 bytes +// at a time. The Go implementation is endian agnostic, in that its output is +// the same (as little-endian C++ code), regardless of the CPU's endianness. +// +// Thus, when comparing Go's output to C++ output generated beforehand, such as +// the "testdata/pi.txt.rawsnappy" file generated by C++ code on a little- +// endian system, we can run that test regardless of the runtime.GOARCH value. +// +// When comparing Go's output to dynamically generated C++ output, i.e. the +// result of fork/exec'ing a C++ program, we can run that test only on +// little-endian systems, because the C++ output might be different on +// big-endian systems. The runtime package doesn't export endianness per se, +// but we can restrict this match-C++ test to common little-endian systems. +const goEncoderShouldMatchCppEncoder = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "arm" + +func TestMaxEncodedLenOfMaxBlockSize(t *testing.T) { + got := maxEncodedLenOfMaxBlockSize + want := MaxEncodedLen(maxBlockSize) + if got != want { + t.Fatalf("got %d, want %d", got, want) + } +} + +func cmp(a, b []byte) error { + if bytes.Equal(a, b) { + return nil + } + if len(a) != len(b) { + return fmt.Errorf("got %d bytes, want %d", len(a), len(b)) + } + for i := range a { + if a[i] != b[i] { + return fmt.Errorf("byte #%d: got 0x%02x, want 0x%02x", i, a[i], b[i]) + } + } + return nil +} + +func roundtrip(b, ebuf, dbuf []byte) error { + d, err := Decode(dbuf, Encode(ebuf, b)) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if err := cmp(d, b); err != nil { + return fmt.Errorf("roundtrip mismatch: %v", err) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rng := rand.New(rand.NewSource(1)) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(rng.Intn(256)) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestInvalidVarint(t *testing.T) { + testCases := []struct { + desc string + input string + }{{ + "invalid varint, final byte has continuation bit set", + "\xff", + }, { + "invalid varint, value overflows uint64", + "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00", + }, { + // https://github.com/google/snappy/blob/master/format_description.txt + // says that "the stream starts with the uncompressed length [as a + // varint] (up to a maximum of 2^32 - 1)". + "valid varint (as uint64), but value overflows uint32", + "\x80\x80\x80\x80\x10", + }} + + for _, tc := range testCases { + input := []byte(tc.input) + if _, err := DecodedLen(input); err != ErrCorrupt { + t.Errorf("%s: DecodedLen: got %v, want ErrCorrupt", tc.desc, err) + } + if _, err := Decode(nil, input); err != ErrCorrupt { + t.Errorf("%s: Decode: got %v, want ErrCorrupt", tc.desc, err) + } + } +} + +func TestDecode(t *testing.T) { + lit40Bytes := make([]byte, 40) + for i := range lit40Bytes { + lit40Bytes[i] = byte(i) + } + lit40 := string(lit40Bytes) + + testCases := []struct { + desc string + input string + want string + wantErr error + }{{ + `decodedLen=0; valid input`, + "\x00", + "", + nil, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; valid input`, + "\x03" + "\x08\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=2; tagLiteral, 0-byte length; length=3; not enough dst bytes`, + "\x02" + "\x08\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 0-byte length; length=3; not enough src bytes`, + "\x03" + "\x08\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=40; tagLiteral, 0-byte length; length=40; valid input`, + "\x28" + "\x9c" + lit40, + lit40, + nil, + }, { + `decodedLen=1; tagLiteral, 1-byte length; not enough length bytes`, + "\x01" + "\xf0", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 1-byte length; length=3; valid input`, + "\x03" + "\xf0\x02\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 2-byte length; not enough length bytes`, + "\x01" + "\xf4\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 2-byte length; length=3; valid input`, + "\x03" + "\xf4\x02\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 3-byte length; not enough length bytes`, + "\x01" + "\xf8\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 3-byte length; length=3; valid input`, + "\x03" + "\xf8\x02\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=1; tagLiteral, 4-byte length; not enough length bytes`, + "\x01" + "\xfc\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=1; tagLiteral, 4-byte length; length=3; not enough dst bytes`, + "\x01" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral, 4-byte length; length=3; not enough src bytes`, + "\x04" + "\xfc\x02\x00\x00\x00\xff", + "", + ErrCorrupt, + }, { + `decodedLen=3; tagLiteral, 4-byte length; length=3; valid input`, + "\x03" + "\xfc\x02\x00\x00\x00\xff\xff\xff", + "\xff\xff\xff", + nil, + }, { + `decodedLen=4; tagCopy1, 1 extra length|offset byte; not enough extra bytes`, + "\x04" + "\x01", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy2, 2 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x02\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagCopy4, 4 extra length|offset bytes; not enough extra bytes`, + "\x04" + "\x03\x00\x00\x00", + "", + ErrCorrupt, + }, { + `decodedLen=4; tagLiteral (4 bytes "abcd"); valid input`, + "\x04" + "\x0cabcd", + "abcd", + nil, + }, { + `decodedLen=13; tagLiteral (4 bytes "abcd"); tagCopy1; length=9 offset=4; valid input`, + "\x0d" + "\x0cabcd" + "\x15\x04", + "abcdabcdabcda", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; valid input`, + "\x08" + "\x0cabcd" + "\x01\x04", + "abcdabcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=2; valid input`, + "\x08" + "\x0cabcd" + "\x01\x02", + "abcdcdcd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=1; valid input`, + "\x08" + "\x0cabcd" + "\x01\x01", + "abcddddd", + nil, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=0; zero offset`, + "\x08" + "\x0cabcd" + "\x01\x00", + "", + ErrCorrupt, + }, { + `decodedLen=9; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; inconsistent dLen`, + "\x09" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=8; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=5; offset too large`, + "\x08" + "\x0cabcd" + "\x01\x05", + "", + ErrCorrupt, + }, { + `decodedLen=7; tagLiteral (4 bytes "abcd"); tagCopy1; length=4 offset=4; length too large`, + "\x07" + "\x0cabcd" + "\x01\x04", + "", + ErrCorrupt, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy2; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x06\x03\x00", + "abcdbc", + nil, + }, { + `decodedLen=6; tagLiteral (4 bytes "abcd"); tagCopy4; length=2 offset=3; valid input`, + "\x06" + "\x0cabcd" + "\x07\x03\x00\x00\x00", + "abcdbc", + nil, + }} + + const ( + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to dBuf + // to check that Decode does not write bytes past the end of + // dBuf[:dLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + + var dBuf [100]byte +loop: + for i, tc := range testCases { + input := []byte(tc.input) + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("#%d (%s): input shouldn't contain %#02x\ninput: % x", i, tc.desc, x, input) + continue loop + } + } + + dLen, n := binary.Uvarint(input) + if n <= 0 { + t.Errorf("#%d (%s): invalid varint-encoded dLen", i, tc.desc) + continue + } + if dLen > uint64(len(dBuf)) { + t.Errorf("#%d (%s): dLen %d is too large", i, tc.desc, dLen) + continue + } + + for j := range dBuf { + dBuf[j] = byte(notPresentBase + j%notPresentLen) + } + g, gotErr := Decode(dBuf[:], input) + if got := string(g); got != tc.want || gotErr != tc.wantErr { + t.Errorf("#%d (%s):\ngot %q, %v\nwant %q, %v", + i, tc.desc, got, gotErr, tc.want, tc.wantErr) + continue + } + for j, x := range dBuf { + if uint64(j) < dLen { + continue + } + if w := byte(notPresentBase + j%notPresentLen); x != w { + t.Errorf("#%d (%s): Decode overrun: dBuf[%d] was modified: got %#02x, want %#02x\ndBuf: % x", + i, tc.desc, j, x, w, dBuf) + continue loop + } + } + } +} + +func TestDecodeCopy4(t *testing.T) { + dots := strings.Repeat(".", 65536) + + input := strings.Join([]string{ + "\x89\x80\x04", // decodedLen = 65545. + "\x0cpqrs", // 4-byte literal "pqrs". + "\xf4\xff\xff" + dots, // 65536-byte literal dots. + "\x13\x04\x00\x01\x00", // tagCopy4; length=5 offset=65540. + }, "") + + gotBytes, err := Decode(nil, []byte(input)) + if err != nil { + t.Fatal(err) + } + got := string(gotBytes) + want := "pqrs" + dots + "pqrs." + if len(got) != len(want) { + t.Fatalf("got %d bytes, want %d", len(got), len(want)) + } + if got != want { + for i := 0; i < len(got); i++ { + if g, w := got[i], want[i]; g != w { + t.Fatalf("byte #%d: got %#02x, want %#02x", i, g, w) + } + } + } +} + +// TestDecodeLengthOffset tests decoding an encoding of the form literal + +// copy-length-offset + literal. For example: "abcdefghijkl" + "efghij" + "AB". +func TestDecodeLengthOffset(t *testing.T) { + const ( + prefix = "abcdefghijklmnopqr" + suffix = "ABCDEFGHIJKLMNOPQR" + + // notPresentXxx defines a range of byte values [0xa0, 0xc5) that are + // not present in either the input or the output. It is written to + // gotBuf to check that Decode does not write bytes past the end of + // gotBuf[:totalLen]. + // + // The magic number 37 was chosen because it is prime. A more 'natural' + // number like 32 might lead to a false negative if, for example, a + // byte was incorrectly copied 4*8 bytes later. + notPresentBase = 0xa0 + notPresentLen = 37 + ) + var gotBuf, wantBuf, inputBuf [128]byte + for length := 1; length <= 18; length++ { + for offset := 1; offset <= 18; offset++ { + loop: + for suffixLen := 0; suffixLen <= 18; suffixLen++ { + totalLen := len(prefix) + length + suffixLen + + inputLen := binary.PutUvarint(inputBuf[:], uint64(totalLen)) + inputBuf[inputLen] = tagLiteral + 4*byte(len(prefix)-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], prefix) + inputBuf[inputLen+0] = tagCopy2 + 4*byte(length-1) + inputBuf[inputLen+1] = byte(offset) + inputBuf[inputLen+2] = 0x00 + inputLen += 3 + if suffixLen > 0 { + inputBuf[inputLen] = tagLiteral + 4*byte(suffixLen-1) + inputLen++ + inputLen += copy(inputBuf[inputLen:], suffix[:suffixLen]) + } + input := inputBuf[:inputLen] + + for i := range gotBuf { + gotBuf[i] = byte(notPresentBase + i%notPresentLen) + } + got, err := Decode(gotBuf[:], input) + if err != nil { + t.Errorf("length=%d, offset=%d; suffixLen=%d: %v", length, offset, suffixLen, err) + continue + } + + wantLen := 0 + wantLen += copy(wantBuf[wantLen:], prefix) + for i := 0; i < length; i++ { + wantBuf[wantLen] = wantBuf[wantLen-offset] + wantLen++ + } + wantLen += copy(wantBuf[wantLen:], suffix[:suffixLen]) + want := wantBuf[:wantLen] + + for _, x := range input { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: input shouldn't contain %#02x\ninput: % x", + length, offset, suffixLen, x, input) + continue loop + } + } + for i, x := range gotBuf { + if i < totalLen { + continue + } + if w := byte(notPresentBase + i%notPresentLen); x != w { + t.Errorf("length=%d, offset=%d; suffixLen=%d; totalLen=%d: "+ + "Decode overrun: gotBuf[%d] was modified: got %#02x, want %#02x\ngotBuf: % x", + length, offset, suffixLen, totalLen, i, x, w, gotBuf) + continue loop + } + } + for _, x := range want { + if notPresentBase <= x && x < notPresentBase+notPresentLen { + t.Errorf("length=%d, offset=%d; suffixLen=%d: want shouldn't contain %#02x\nwant: % x", + length, offset, suffixLen, x, want) + continue loop + } + } + + if !bytes.Equal(got, want) { + t.Errorf("length=%d, offset=%d; suffixLen=%d:\ninput % x\ngot % x\nwant % x", + length, offset, suffixLen, input, got, want) + continue + } + } + } + } +} + +const ( + goldenText = "Mark.Twain-Tom.Sawyer.txt" + goldenCompressed = goldenText + ".rawsnappy" +) + +func TestDecodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got, err := Decode(nil, src) + if err != nil { + t.Fatalf("Decode: %v", err) + } + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestEncodeGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + got := Encode(nil, src) + want, err := ioutil.ReadFile(filepath.Join(tDir, goldenCompressed)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + if err := cmp(got, want); err != nil { + t.Fatal(err) + } +} + +func TestExtendMatchGoldenInput(t *testing.T) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + t.Fatalf("ReadFile: %v", err) + } + for i, tc := range extendMatchGoldenTestCases { + got := extendMatch(src, tc.i, tc.j) + if got != tc.want { + t.Errorf("test #%d: i, j = %5d, %5d: got %5d (= j + %6d), want %5d (= j + %6d)", + i, tc.i, tc.j, got, got-tc.j, tc.want, tc.want-tc.j) + } + } +} + +func TestExtendMatch(t *testing.T) { + // ref is a simple, reference implementation of extendMatch. + ref := func(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j + } + + nums := []int{0, 1, 2, 7, 8, 9, 29, 30, 31, 32, 33, 34, 38, 39, 40} + for yIndex := 40; yIndex > 30; yIndex-- { + xxx := bytes.Repeat([]byte("x"), 40) + if yIndex < len(xxx) { + xxx[yIndex] = 'y' + } + for _, i := range nums { + for _, j := range nums { + if i >= j { + continue + } + got := extendMatch(xxx, i, j) + want := ref(xxx, i, j) + if got != want { + t.Errorf("yIndex=%d, i=%d, j=%d: got %d, want %d", yIndex, i, j, got, want) + } + } + } + } +} + +const snappytoolCmdName = "cmd/snappytool/snappytool" + +func skipTestSameEncodingAsCpp() (msg string) { + if !goEncoderShouldMatchCppEncoder { + return fmt.Sprintf("skipping testing that the encoding is byte-for-byte identical to C++: GOARCH=%s", runtime.GOARCH) + } + if _, err := os.Stat(snappytoolCmdName); err != nil { + return fmt.Sprintf("could not find snappytool: %v", err) + } + return "" +} + +func runTestSameEncodingAsCpp(src []byte) error { + got := Encode(nil, src) + + cmd := exec.Command(snappytoolCmdName, "-e") + cmd.Stdin = bytes.NewReader(src) + want, err := cmd.Output() + if err != nil { + return fmt.Errorf("could not run snappytool: %v", err) + } + return cmp(got, want) +} + +func TestSameEncodingAsCppShortCopies(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + src := bytes.Repeat([]byte{'a'}, 20) + for i := 0; i <= len(src); i++ { + if err := runTestSameEncodingAsCpp(src[:i]); err != nil { + t.Errorf("i=%d: %v", i, err) + } + } +} + +func TestSameEncodingAsCppLongFiles(t *testing.T) { + if msg := skipTestSameEncodingAsCpp(); msg != "" { + t.Skip(msg) + } + bDir := filepath.FromSlash(*benchdataDir) + failed := false + for i, tf := range testFiles { + if err := downloadBenchmarkFiles(t, tf.filename); err != nil { + t.Fatalf("failed to download testdata: %s", err) + } + data := readFile(t, filepath.Join(bDir, tf.filename)) + if n := tf.sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if err := runTestSameEncodingAsCpp(data); err != nil { + t.Errorf("i=%d: %v", i, err) + failed = true + } + } + if failed { + t.Errorf("was the snappytool program built against the C++ snappy library version " + + "d53de187 or later, commited on 2016-04-05? See " + + "https://github.com/google/snappy/commit/d53de18799418e113e44444252a39b12a0e4e0cc") + } +} + +// TestSlowForwardCopyOverrun tests the "expand the pattern" algorithm +// described in decode_amd64.s and its claim of a 10 byte overrun worst case. +func TestSlowForwardCopyOverrun(t *testing.T) { + const base = 100 + + for length := 1; length < 18; length++ { + for offset := 1; offset < 18; offset++ { + highWaterMark := base + d := base + l := length + o := offset + + // makeOffsetAtLeast8 + for o < 8 { + if end := d + 8; highWaterMark < end { + highWaterMark = end + } + l -= o + d += o + o += o + } + + // fixUpSlowForwardCopy + a := d + d += l + + // finishSlowForwardCopy + for l > 0 { + if end := a + 8; highWaterMark < end { + highWaterMark = end + } + a += 8 + l -= 8 + } + + dWant := base + length + overrun := highWaterMark - dWant + if d != dWant || overrun < 0 || 10 < overrun { + t.Errorf("length=%d, offset=%d: d and overrun: got (%d, %d), want (%d, something in [0, 10])", + length, offset, d, overrun, dWant) + } + } + } +} + +// TestEncodeNoiseThenRepeats encodes input for which the first half is very +// incompressible and the second half is very compressible. The encoded form's +// length should be closer to 50% of the original length than 100%. +func TestEncodeNoiseThenRepeats(t *testing.T) { + for _, origLen := range []int{256 * 1024, 2048 * 1024} { + src := make([]byte, origLen) + rng := rand.New(rand.NewSource(1)) + firstHalf, secondHalf := src[:origLen/2], src[origLen/2:] + for i := range firstHalf { + firstHalf[i] = uint8(rng.Intn(256)) + } + for i := range secondHalf { + secondHalf[i] = uint8(i >> 8) + } + dst := Encode(nil, src) + if got, want := len(dst), origLen*3/4; got >= want { + t.Errorf("origLen=%d: got %d encoded bytes, want less than %d", origLen, got, want) + } + } +} + +func TestFramingFormat(t *testing.T) { + // src is comprised of alternating 1e5-sized sequences of random + // (incompressible) bytes and repeated (compressible) bytes. 1e5 was chosen + // because it is larger than maxBlockSize (64k). + src := make([]byte, 1e6) + rng := rand.New(rand.NewSource(1)) + for i := 0; i < 10; i++ { + if i%2 == 0 { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(rng.Intn(256)) + } + } else { + for j := 0; j < 1e5; j++ { + src[1e5*i+j] = uint8(i) + } + } + } + + buf := new(bytes.Buffer) + if _, err := NewWriter(buf).Write(src); err != nil { + t.Fatalf("Write: encoding: %v", err) + } + dst, err := ioutil.ReadAll(NewReader(buf)) + if err != nil { + t.Fatalf("ReadAll: decoding: %v", err) + } + if err := cmp(dst, src); err != nil { + t.Fatal(err) + } +} + +func TestWriterGoldenOutput(t *testing.T) { + buf := new(bytes.Buffer) + w := NewBufferedWriter(buf) + defer w.Close() + w.Write([]byte("abcd")) // Not compressible. + w.Flush() + w.Write(bytes.Repeat([]byte{'A'}, 150)) // Compressible. + w.Flush() + // The next chunk is also compressible, but a naive, greedy encoding of the + // overall length 67 copy as a length 64 copy (the longest expressible as a + // tagCopy1 or tagCopy2) plus a length 3 remainder would be two 3-byte + // tagCopy2 tags (6 bytes), since the minimum length for a tagCopy1 is 4 + // bytes. Instead, we could do it shorter, in 5 bytes: a 3-byte tagCopy2 + // (of length 60) and a 2-byte tagCopy1 (of length 7). + w.Write(bytes.Repeat([]byte{'B'}, 68)) + w.Write([]byte("efC")) // Not compressible. + w.Write(bytes.Repeat([]byte{'C'}, 20)) // Compressible. + w.Write(bytes.Repeat([]byte{'B'}, 20)) // Compressible. + w.Write([]byte("g")) // Not compressible. + w.Flush() + + got := buf.String() + want := strings.Join([]string{ + magicChunk, + "\x01\x08\x00\x00", // Uncompressed chunk, 8 bytes long (including 4 byte checksum). + "\x68\x10\xe6\xb6", // Checksum. + "\x61\x62\x63\x64", // Uncompressed payload: "abcd". + "\x00\x11\x00\x00", // Compressed chunk, 17 bytes long (including 4 byte checksum). + "\x5f\xeb\xf2\x10", // Checksum. + "\x96\x01", // Compressed payload: Uncompressed length (varint encoded): 150. + "\x00\x41", // Compressed payload: tagLiteral, length=1, "A". + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\xfe\x01\x00", // Compressed payload: tagCopy2, length=64, offset=1. + "\x52\x01\x00", // Compressed payload: tagCopy2, length=21, offset=1. + "\x00\x18\x00\x00", // Compressed chunk, 24 bytes long (including 4 byte checksum). + "\x30\x85\x69\xeb", // Checksum. + "\x70", // Compressed payload: Uncompressed length (varint encoded): 112. + "\x00\x42", // Compressed payload: tagLiteral, length=1, "B". + "\xee\x01\x00", // Compressed payload: tagCopy2, length=60, offset=1. + "\x0d\x01", // Compressed payload: tagCopy1, length=7, offset=1. + "\x08\x65\x66\x43", // Compressed payload: tagLiteral, length=3, "efC". + "\x4e\x01\x00", // Compressed payload: tagCopy2, length=20, offset=1. + "\x4e\x5a\x00", // Compressed payload: tagCopy2, length=20, offset=90. + "\x00\x67", // Compressed payload: tagLiteral, length=1, "g". + }, "") + if got != want { + t.Fatalf("\ngot: % x\nwant: % x", got, want) + } +} + +func TestEmitLiteral(t *testing.T) { + testCases := []struct { + length int + want string + }{ + {1, "\x00"}, + {2, "\x04"}, + {59, "\xe8"}, + {60, "\xec"}, + {61, "\xf0\x3c"}, + {62, "\xf0\x3d"}, + {254, "\xf0\xfd"}, + {255, "\xf0\xfe"}, + {256, "\xf0\xff"}, + {257, "\xf4\x00\x01"}, + {65534, "\xf4\xfd\xff"}, + {65535, "\xf4\xfe\xff"}, + {65536, "\xf4\xff\xff"}, + } + + dst := make([]byte, 70000) + nines := bytes.Repeat([]byte{0x99}, 65536) + for _, tc := range testCases { + lit := nines[:tc.length] + n := emitLiteral(dst, lit) + if !bytes.HasSuffix(dst[:n], lit) { + t.Errorf("length=%d: did not end with that many literal bytes", tc.length) + continue + } + got := string(dst[:n-tc.length]) + if got != tc.want { + t.Errorf("length=%d:\ngot % x\nwant % x", tc.length, got, tc.want) + continue + } + } +} + +func TestEmitCopy(t *testing.T) { + testCases := []struct { + offset int + length int + want string + }{ + {8, 04, "\x01\x08"}, + {8, 11, "\x1d\x08"}, + {8, 12, "\x2e\x08\x00"}, + {8, 13, "\x32\x08\x00"}, + {8, 59, "\xea\x08\x00"}, + {8, 60, "\xee\x08\x00"}, + {8, 61, "\xf2\x08\x00"}, + {8, 62, "\xf6\x08\x00"}, + {8, 63, "\xfa\x08\x00"}, + {8, 64, "\xfe\x08\x00"}, + {8, 65, "\xee\x08\x00\x05\x08"}, + {8, 66, "\xee\x08\x00\x09\x08"}, + {8, 67, "\xee\x08\x00\x0d\x08"}, + {8, 68, "\xfe\x08\x00\x01\x08"}, + {8, 69, "\xfe\x08\x00\x05\x08"}, + {8, 80, "\xfe\x08\x00\x3e\x08\x00"}, + + {256, 04, "\x21\x00"}, + {256, 11, "\x3d\x00"}, + {256, 12, "\x2e\x00\x01"}, + {256, 13, "\x32\x00\x01"}, + {256, 59, "\xea\x00\x01"}, + {256, 60, "\xee\x00\x01"}, + {256, 61, "\xf2\x00\x01"}, + {256, 62, "\xf6\x00\x01"}, + {256, 63, "\xfa\x00\x01"}, + {256, 64, "\xfe\x00\x01"}, + {256, 65, "\xee\x00\x01\x25\x00"}, + {256, 66, "\xee\x00\x01\x29\x00"}, + {256, 67, "\xee\x00\x01\x2d\x00"}, + {256, 68, "\xfe\x00\x01\x21\x00"}, + {256, 69, "\xfe\x00\x01\x25\x00"}, + {256, 80, "\xfe\x00\x01\x3e\x00\x01"}, + + {2048, 04, "\x0e\x00\x08"}, + {2048, 11, "\x2a\x00\x08"}, + {2048, 12, "\x2e\x00\x08"}, + {2048, 13, "\x32\x00\x08"}, + {2048, 59, "\xea\x00\x08"}, + {2048, 60, "\xee\x00\x08"}, + {2048, 61, "\xf2\x00\x08"}, + {2048, 62, "\xf6\x00\x08"}, + {2048, 63, "\xfa\x00\x08"}, + {2048, 64, "\xfe\x00\x08"}, + {2048, 65, "\xee\x00\x08\x12\x00\x08"}, + {2048, 66, "\xee\x00\x08\x16\x00\x08"}, + {2048, 67, "\xee\x00\x08\x1a\x00\x08"}, + {2048, 68, "\xfe\x00\x08\x0e\x00\x08"}, + {2048, 69, "\xfe\x00\x08\x12\x00\x08"}, + {2048, 80, "\xfe\x00\x08\x3e\x00\x08"}, + } + + dst := make([]byte, 1024) + for _, tc := range testCases { + n := emitCopy(dst, tc.offset, tc.length) + got := string(dst[:n]) + if got != tc.want { + t.Errorf("offset=%d, length=%d:\ngot % x\nwant % x", tc.offset, tc.length, got, tc.want) + } + } +} + +func TestNewBufferedWriter(t *testing.T) { + // Test all 32 possible sub-sequences of these 5 input slices. + // + // Their lengths sum to 400,000, which is over 6 times the Writer ibuf + // capacity: 6 * maxBlockSize is 393,216. + inputs := [][]byte{ + bytes.Repeat([]byte{'a'}, 40000), + bytes.Repeat([]byte{'b'}, 150000), + bytes.Repeat([]byte{'c'}, 60000), + bytes.Repeat([]byte{'d'}, 120000), + bytes.Repeat([]byte{'e'}, 30000), + } +loop: + for i := 0; i < 1< 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e1(b *testing.B) { benchWords(b, 1e1, true) } +func BenchmarkWordsDecode1e2(b *testing.B) { benchWords(b, 1e2, true) } +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e1(b *testing.B) { benchWords(b, 1e1, false) } +func BenchmarkWordsEncode1e2(b *testing.B) { benchWords(b, 1e2, false) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +func BenchmarkRandomEncode(b *testing.B) { + rng := rand.New(rand.NewSource(1)) + data := make([]byte, 1<<20) + for i := range data { + data[i] = uint8(rng.Intn(256)) + } + benchEncode(b, data) +} + +// testFiles' values are copied directly from +// https://raw.githubusercontent.com/google/snappy/master/snappy_unittest.cc +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string + sizeLimit int +}{ + {"html", "html", 0}, + {"urls", "urls.10K", 0}, + {"jpg", "fireworks.jpeg", 0}, + {"jpg_200", "fireworks.jpeg", 200}, + {"pdf", "paper-100k.pdf", 0}, + {"html4", "html_x_4", 0}, + {"txt1", "alice29.txt", 0}, + {"txt2", "asyoulik.txt", 0}, + {"txt3", "lcet10.txt", 0}, + {"txt4", "plrabn12.txt", 0}, + {"pb", "geo.protodata", 0}, + {"gaviota", "kppkn.gtb", 0}, +} + +const ( + // The benchmark data files are at this canonical URL. + benchURL = "https://raw.githubusercontent.com/google/snappy/master/testdata/" +) + +func downloadBenchmarkFiles(b testing.TB, basename string) (errRet error) { + bDir := filepath.FromSlash(*benchdataDir) + filename := filepath.Join(bDir, basename) + if stat, err := os.Stat(filename); err == nil && stat.Size() != 0 { + return nil + } + + if !*download { + b.Skipf("test data not found; skipping %s without the -download flag", testOrBenchmark(b)) + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.MkdirAll(bDir, 0777); err != nil && !os.IsExist(err) { + return fmt.Errorf("failed to create %s: %s", bDir, err) + } + + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + url := benchURL + basename + resp, err := http.Get(url) + if err != nil { + return fmt.Errorf("failed to download %s: %s", url, err) + } + defer resp.Body.Close() + if s := resp.StatusCode; s != http.StatusOK { + return fmt.Errorf("downloading %s: HTTP status code %d (%s)", url, s, http.StatusText(s)) + } + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to download %s to %s: %s", url, filename, err) + } + return nil +} + +func benchFile(b *testing.B, i int, decode bool) { + if err := downloadBenchmarkFiles(b, testFiles[i].filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + bDir := filepath.FromSlash(*benchdataDir) + data := readFile(b, filepath.Join(bDir, testFiles[i].filename)) + if n := testFiles[i].sizeLimit; 0 < n && n < len(data) { + data = data[:n] + } + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } + +func BenchmarkExtendMatch(b *testing.B) { + tDir := filepath.FromSlash(*testdataDir) + src, err := ioutil.ReadFile(filepath.Join(tDir, goldenText)) + if err != nil { + b.Fatalf("ReadFile: %v", err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + for _, tc := range extendMatchGoldenTestCases { + extendMatch(src, tc.i, tc.j) + } + } +} diff --git a/vendor/github.com/google/go-querystring/.gitignore b/vendor/github.com/google/go-querystring/.gitignore new file mode 100644 index 0000000000..9ed3b07cef --- /dev/null +++ b/vendor/github.com/google/go-querystring/.gitignore @@ -0,0 +1 @@ +*.test diff --git a/vendor/github.com/google/go-querystring/CONTRIBUTING.md b/vendor/github.com/google/go-querystring/CONTRIBUTING.md new file mode 100644 index 0000000000..51cf5cd1ad --- /dev/null +++ b/vendor/github.com/google/go-querystring/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/go-querystring/README.md b/vendor/github.com/google/go-querystring/README.md new file mode 100644 index 0000000000..03e937034d --- /dev/null +++ b/vendor/github.com/google/go-querystring/README.md @@ -0,0 +1,39 @@ +# go-querystring # + +go-querystring is Go library for encoding structs into URL query parameters. + + +**Documentation:** +**Build Status:** [![Build Status](https://drone.io/github.com/google/go-querystring/status.png)](https://drone.io/github.com/google/go-querystring/latest) + +## Usage ## + +```go +import "github.com/google/go-querystring/query" +``` + +go-querystring is designed to assist in scenarios where you want to construct a +URL using a struct that represents the URL query parameters. You might do this +to enforce the type safety of your parameters, for example, as is done in the +[go-github][] library. + +The query package exports a single `Values()` function. A simple example: + +```go +type Options struct { + Query string `url:"q"` + ShowAll bool `url:"all"` + Page int `url:"page"` +} + +opt := Options{ "foo", true, 2 } +v, _ := query.Values(opt) +fmt.Print(v.Encode()) // will output: "q=foo&all=true&page=2" +``` + +[go-github]: https://github.com/google/go-github/commit/994f6f8405f052a117d2d0b500054341048fbb08 + +## License ## + +This library is distributed under the BSD-style license found in the [LICENSE](./LICENSE) +file. diff --git a/vendor/github.com/google/go-querystring/query/encode.go b/vendor/github.com/google/go-querystring/query/encode.go index 3d4a01df37..37080b19b5 100644 --- a/vendor/github.com/google/go-querystring/query/encode.go +++ b/vendor/github.com/google/go-querystring/query/encode.go @@ -88,8 +88,11 @@ type Encoder interface { // same name. Including the "comma" option signals that the field should be // encoded as a single comma-delimited value. Including the "space" option // similarly encodes the value as a single space-delimited string. Including -// the "brackets" option signals that the multiple URL values should have "[]" -// appended to the value name. +// the "semicolon" option will encode the value as a semicolon-delimited string. +// Including the "brackets" option signals that the multiple URL values should +// have "[]" appended to the value name. "numbered" will append a number to +// the end of each incidence of the value name, example: +// name0=value0&name1=value1, etc. // // Anonymous struct fields are usually encoded as if their inner exported // fields were fields in the outer struct, subject to the standard Go @@ -184,6 +187,8 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { del = ',' } else if opts.Contains("space") { del = ' ' + } else if opts.Contains("semicolon") { + del = ';' } else if opts.Contains("brackets") { name = name + "[]" } @@ -202,17 +207,16 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { values.Add(name, s.String()) } else { for i := 0; i < sv.Len(); i++ { - values.Add(name, valueString(sv.Index(i), opts)) + k := name + if opts.Contains("numbered") { + k = fmt.Sprintf("%s%d", name, i) + } + values.Add(k, valueString(sv.Index(i), opts)) } } continue } - if sv.Type() == timeType { - values.Add(name, valueString(sv, opts)) - continue - } - for sv.Kind() == reflect.Ptr { if sv.IsNil() { break @@ -220,6 +224,11 @@ func reflectValue(values url.Values, val reflect.Value, scope string) error { sv = sv.Elem() } + if sv.Type() == timeType { + values.Add(name, valueString(sv, opts)) + continue + } + if sv.Kind() == reflect.Struct { reflectValue(values, sv, name) continue diff --git a/vendor/github.com/google/go-querystring/query/encode_test.go b/vendor/github.com/google/go-querystring/query/encode_test.go new file mode 100644 index 0000000000..0f26a77509 --- /dev/null +++ b/vendor/github.com/google/go-querystring/query/encode_test.go @@ -0,0 +1,328 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package query + +import ( + "fmt" + "net/url" + "reflect" + "testing" + "time" +) + +type Nested struct { + A SubNested `url:"a"` + B *SubNested `url:"b"` + Ptr *SubNested `url:"ptr,omitempty"` +} + +type SubNested struct { + Value string `url:"value"` +} + +func TestValues_types(t *testing.T) { + str := "string" + strPtr := &str + timeVal := time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC) + + tests := []struct { + in interface{} + want url.Values + }{ + { + // basic primitives + struct { + A string + B int + C uint + D float32 + E bool + }{}, + url.Values{ + "A": {""}, + "B": {"0"}, + "C": {"0"}, + "D": {"0"}, + "E": {"false"}, + }, + }, + { + // pointers + struct { + A *string + B *int + C **string + D *time.Time + }{ + A: strPtr, + C: &strPtr, + D: &timeVal, + }, + url.Values{ + "A": {str}, + "B": {""}, + "C": {str}, + "D": {"2000-01-01T12:34:56Z"}, + }, + }, + { + // slices and arrays + struct { + A []string + B []string `url:",comma"` + C []string `url:",space"` + D [2]string + E [2]string `url:",comma"` + F [2]string `url:",space"` + G []*string `url:",space"` + H []bool `url:",int,space"` + I []string `url:",brackets"` + J []string `url:",semicolon"` + K []string `url:",numbered"` + }{ + A: []string{"a", "b"}, + B: []string{"a", "b"}, + C: []string{"a", "b"}, + D: [2]string{"a", "b"}, + E: [2]string{"a", "b"}, + F: [2]string{"a", "b"}, + G: []*string{&str, &str}, + H: []bool{true, false}, + I: []string{"a", "b"}, + J: []string{"a", "b"}, + K: []string{"a", "b"}, + }, + url.Values{ + "A": {"a", "b"}, + "B": {"a,b"}, + "C": {"a b"}, + "D": {"a", "b"}, + "E": {"a,b"}, + "F": {"a b"}, + "G": {"string string"}, + "H": {"1 0"}, + "I[]": {"a", "b"}, + "J": {"a;b"}, + "K0": {"a"}, + "K1": {"b"}, + }, + }, + { + // other types + struct { + A time.Time + B time.Time `url:",unix"` + C bool `url:",int"` + D bool `url:",int"` + }{ + A: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC), + B: time.Date(2000, 1, 1, 12, 34, 56, 0, time.UTC), + C: true, + D: false, + }, + url.Values{ + "A": {"2000-01-01T12:34:56Z"}, + "B": {"946730096"}, + "C": {"1"}, + "D": {"0"}, + }, + }, + { + struct { + Nest Nested `url:"nest"` + }{ + Nested{ + A: SubNested{ + Value: "that", + }, + }, + }, + url.Values{ + "nest[a][value]": {"that"}, + "nest[b]": {""}, + }, + }, + { + struct { + Nest Nested `url:"nest"` + }{ + Nested{ + Ptr: &SubNested{ + Value: "that", + }, + }, + }, + url.Values{ + "nest[a][value]": {""}, + "nest[b]": {""}, + "nest[ptr][value]": {"that"}, + }, + }, + { + nil, + url.Values{}, + }, + } + + for i, tt := range tests { + v, err := Values(tt.in) + if err != nil { + t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err) + } + + if !reflect.DeepEqual(tt.want, v) { + t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want) + } + } +} + +func TestValues_omitEmpty(t *testing.T) { + str := "" + s := struct { + a string + A string + B string `url:",omitempty"` + C string `url:"-"` + D string `url:"omitempty"` // actually named omitempty, not an option + E *string `url:",omitempty"` + }{E: &str} + + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{ + "A": {""}, + "omitempty": {""}, + "E": {""}, // E is included because the pointer is not empty, even though the string being pointed to is + } + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +type A struct { + B +} + +type B struct { + C string +} + +type D struct { + B + C string +} + +type e struct { + B + C string +} + +type F struct { + e +} + +func TestValues_embeddedStructs(t *testing.T) { + tests := []struct { + in interface{} + want url.Values + }{ + { + A{B{C: "foo"}}, + url.Values{"C": {"foo"}}, + }, + { + D{B: B{C: "bar"}, C: "foo"}, + url.Values{"C": {"foo", "bar"}}, + }, + { + F{e{B: B{C: "bar"}, C: "foo"}}, // With unexported embed + url.Values{"C": {"foo", "bar"}}, + }, + } + + for i, tt := range tests { + v, err := Values(tt.in) + if err != nil { + t.Errorf("%d. Values(%q) returned error: %v", i, tt.in, err) + } + + if !reflect.DeepEqual(tt.want, v) { + t.Errorf("%d. Values(%q) returned %v, want %v", i, tt.in, v, tt.want) + } + } +} + +func TestValues_invalidInput(t *testing.T) { + _, err := Values("") + if err == nil { + t.Errorf("expected Values() to return an error on invalid input") + } +} + +type EncodedArgs []string + +func (m EncodedArgs) EncodeValues(key string, v *url.Values) error { + for i, arg := range m { + v.Set(fmt.Sprintf("%s.%d", key, i), arg) + } + return nil +} + +func TestValues_Marshaler(t *testing.T) { + s := struct { + Args EncodedArgs `url:"arg"` + }{[]string{"a", "b", "c"}} + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{ + "arg.0": {"a"}, + "arg.1": {"b"}, + "arg.2": {"c"}, + } + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +func TestValues_MarshalerWithNilPointer(t *testing.T) { + s := struct { + Args *EncodedArgs `url:"arg"` + }{} + v, err := Values(s) + if err != nil { + t.Errorf("Values(%q) returned error: %v", s, err) + } + + want := url.Values{} + if !reflect.DeepEqual(want, v) { + t.Errorf("Values(%q) returned %v, want %v", s, v, want) + } +} + +func TestTagParsing(t *testing.T) { + name, opts := parseTag("field,foobar,foo") + if name != "field" { + t.Fatalf("name = %q, want field", name) + } + for _, tt := range []struct { + opt string + want bool + }{ + {"foobar", true}, + {"foo", true}, + {"bar", false}, + {"field", false}, + } { + if opts.Contains(tt.opt) != tt.want { + t.Errorf("Contains(%q) = %v", tt.opt, !tt.want) + } + } +} diff --git a/vendor/github.com/gopherjs/gopherjs/.gitignore b/vendor/github.com/gopherjs/gopherjs/.gitignore new file mode 100644 index 0000000000..e087097600 --- /dev/null +++ b/vendor/github.com/gopherjs/gopherjs/.gitignore @@ -0,0 +1,2 @@ +/node-syscall/build +/node_modules diff --git a/vendor/github.com/gopherjs/gopherjs/README.md b/vendor/github.com/gopherjs/gopherjs/README.md new file mode 100644 index 0000000000..d5dc6bddba --- /dev/null +++ b/vendor/github.com/gopherjs/gopherjs/README.md @@ -0,0 +1,120 @@ +GopherJS - A compiler from Go to JavaScript +------------------------------------------- + +[![Circle CI](https://circleci.com/gh/gopherjs/gopherjs.svg?style=svg)](https://circleci.com/gh/gopherjs/gopherjs) + +GopherJS compiles Go code ([golang.org](http://golang.org/)) to pure JavaScript code. Its main purpose is to give you the opportunity to write front-end code in Go which will still run in all browsers. Give GopherJS a try on the [GopherJS Playground](http://gopherjs.github.io/playground/). + +### What is supported? +Nearly everything, including Goroutines ([compatibility table](https://github.com/gopherjs/gopherjs/blob/master/doc/packages.md)). Performance is quite good in most cases, see [HTML5 game engine benchmark](http://ajhager.github.io/enj/). + +### Installation and Usage +Get or update GopherJS and dependencies with: + +``` +go get -u github.com/gopherjs/gopherjs +``` + +Now you can use `gopherjs build [package]`, `gopherjs build [files]` or `gopherjs install [package]` which behave similar to the `go` tool. For `main` packages, these commands create a `.js` file and `.js.map` source map in the current directory or in `$GOPATH/bin`. The generated JavaScript file can be used as usual in a website. Use `gopherjs help [command]` to get a list of possible command line flags, e.g. for minification and automatically watching for changes. + +If you want to use `gopherjs run` or `gopherjs test` to run the generated code locally, install Node.js 4.x and the module `source-map-support`: + +``` +npm install --global source-map-support +``` + +For system calls (file system access, etc.), see [this page](https://github.com/gopherjs/gopherjs/blob/master/doc/syscalls.md). + +*Note: GopherJS will try to write compiled object files of the core packages to your $GOROOT/pkg directory. If that fails, it will fall back to $GOPATH/pkg.* + +### Performance Tips + +- Use the `-m` command line flag to generate minified code. +- Apply gzip compression (http://en.wikipedia.org/wiki/HTTP_compression). +- Use `int` instead of `(u)int8/16/32/64`. +- Use `float64` instead of `float32`. + +### Community +- [#gopherjs Channel on Gophers Slack](https://gophers.slack.com/messages/gopherjs/) (invites to Gophers Slack are available [here](http://blog.gopheracademy.com/gophers-slack-community/#how-can-i-be-invited-to-join:2facdc921b2310f18cb851c36fa92369)) +- [Google Group](https://groups.google.com/d/forum/gopherjs) +- [Bindings to JavaScript APIs and libraries](https://github.com/gopherjs/gopherjs/wiki/bindings) +- [GopherJS on Twitter](https://twitter.com/GopherJS) + +### Getting started +#### Interacting with the DOM +The package `github.com/gopherjs/gopherjs/js` (see [documentation](http://godoc.org/github.com/gopherjs/gopherjs/js)) provides functions for interacting with native JavaScript APIs. For example the line + +```js +document.write("Hello world!"); +``` + +would look like this in Go: + +```go +js.Global.Get("document").Call("write", "Hello world!") +``` + +You may also want use the [DOM bindings](http://dominik.honnef.co/go/js/dom), the [jQuery bindings](https://github.com/gopherjs/jquery) (see [TodoMVC Example](https://github.com/gopherjs/todomvc)) or the [AngularJS bindings](https://github.com/gopherjs/go-angularjs). Those are some of the [bindings to JavaScript APIs and libraries](https://github.com/gopherjs/gopherjs/wiki/bindings) by community members. + +#### Providing library functions for use in other JavaScript code +Set a global variable to a map that contains the functions: + +```go +package main + +import "github.com/gopherjs/gopherjs/js" + +func main() { + js.Global.Set("pet", map[string]interface{}{ + "New": New, + }) +} + +type Pet struct { + name string +} + +func New(name string) *js.Object { + return js.MakeWrapper(&Pet{name}) +} + +func (p *Pet) Name() string { + return p.name +} + +func (p *Pet) SetName(name string) { + p.name = name +} +``` + +For more details see [Jason Stone's blog post](http://legacytotheedge.blogspot.de/2014/03/gopherjs-go-to-javascript-transpiler.html) about GopherJS. + +### Architecture + +#### General +GopherJS emulates a 32-bit environment. This means that `int`, `uint` and `uintptr` have a precision of 32 bits. However, the explicit 64-bit integer types `int64` and `uint64` are supported. The `GOARCH` value of GopherJS is "js". You may use it as a build constraint: `// +build js`. + +#### Application Lifecycle + +The `main` function is executed as usual after all `init` functions have run. JavaScript callbacks can also invoke Go functions, even after the `main` function has exited. Therefore the end of the `main` function should not be regarded as the end of the application and does not end the execution of other goroutines. + +In the browser, calling `os.Exit` (e.g. indirectly by `log.Fatal`) also does not terminate the execution of the program. For convenience, it calls `runtime.Goexit` to immediately terminate the calling goroutine. + +#### Goroutines +Goroutines are fully supported by GopherJS. The only restriction is that you need to start a new goroutine if you want to use blocking code called from external JavaScript: + +```go +js.Global.Get("myButton").Call("addEventListener", "click", func() { + go func() { + [...] + someBlockingFunction() + [...] + }() +}) +``` + +How it works: + +JavaScript has no concept of concurrency (except web workers, but those are too strictly separated to be used for goroutines). Because of that, instructions in JavaScript are never blocking. A blocking call would effectively freeze the responsiveness of your web page, so calls with callback arguments are used instead. + +GopherJS does some heavy lifting to work around this restriction: Whenever an instruction is blocking (e.g. communicating with a channel that isn't ready), the whole stack will unwind (= all functions return) and the goroutine will be put to sleep. Then another goroutine which is ready to resume gets picked and its stack with all local variables will be restored. This is done by preserving each stack frame inside a closure. diff --git a/vendor/github.com/gopherjs/gopherjs/circle.yml b/vendor/github.com/gopherjs/gopherjs/circle.yml new file mode 100644 index 0000000000..120b7a2e98 --- /dev/null +++ b/vendor/github.com/gopherjs/gopherjs/circle.yml @@ -0,0 +1,21 @@ +machine: + node: + version: 5.1.0 + environment: + SOURCE_MAP_SUPPORT: false + +dependencies: + pre: + - cd /usr/local && sudo rm -rf go && curl https://storage.googleapis.com/golang/go1.5.linux-amd64.tar.gz | sudo tar -xz && sudo chmod a+w go/src/path/filepath + post: + - mv ./gopherjs $HOME/bin + - npm install --global node-gyp + - cd node-syscall && node-gyp rebuild && mkdir -p ~/.node_libraries/ && cp build/Release/syscall.node ~/.node_libraries/syscall.node + +test: + override: + - diff -u <(echo -n) <(gofmt -d .) + - go tool vet *.go # Go package in root directory. + - for d in */; do echo $d; done | grep -v tests/ | xargs go tool vet # All subdirectories except "tests". + - gopherjs test --short --minify github.com/gopherjs/gopherjs/tests github.com/gopherjs/gopherjs/tests/main github.com/gopherjs/gopherjs/js archive/tar archive/zip bufio bytes compress/bzip2 compress/flate compress/gzip compress/lzw compress/zlib container/heap container/list container/ring crypto/aes crypto/cipher crypto/des crypto/dsa crypto/ecdsa crypto/elliptic crypto/hmac crypto/md5 crypto/rand crypto/rc4 crypto/rsa crypto/sha1 crypto/sha256 crypto/sha512 crypto/subtle crypto/x509 database/sql database/sql/driver debug/dwarf debug/elf debug/gosym debug/macho debug/pe encoding/ascii85 encoding/asn1 encoding/base32 encoding/base64 encoding/binary encoding/csv encoding/gob encoding/hex encoding/json encoding/pem encoding/xml errors expvar flag fmt go/ast go/constant go/doc go/format go/parser go/printer go/scanner go/token hash/adler32 hash/crc32 hash/crc64 hash/fnv html html/template image image/color image/draw image/gif image/jpeg image/png index/suffixarray io io/ioutil math math/big math/cmplx math/rand mime mime/multipart mime/quotedprintable net/http/cookiejar net/http/fcgi net/mail net/rpc/jsonrpc net/textproto net/url path path/filepath reflect regexp regexp/syntax sort strconv strings sync sync/atomic testing/quick text/scanner text/tabwriter text/template text/template/parse time unicode unicode/utf16 unicode/utf8 + - go test -v -race ./... diff --git a/vendor/github.com/gopherjs/gopherjs/js/js_test.go b/vendor/github.com/gopherjs/gopherjs/js/js_test.go new file mode 100644 index 0000000000..c9797fb0fb --- /dev/null +++ b/vendor/github.com/gopherjs/gopherjs/js/js_test.go @@ -0,0 +1,553 @@ +// +build js + +package js_test + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/gopherjs/gopherjs/js" +) + +var dummys = js.Global.Call("eval", `({ + someBool: true, + someString: "abc\u1234", + someInt: 42, + someFloat: 42.123, + someArray: [41, 42, 43], + add: function(a, b) { + return a + b; + }, + mapArray: function(array, f) { + var newArray = new Array(array.length); + for (var i = 0; i < array.length; i++) { + newArray[i] = f(array[i]); + } + return newArray; + }, + toUnixTimestamp: function(d) { + return d.getTime() / 1000; + }, + testField: function(o) { + return o.Field; + }, + testMethod: function(o) { + return o.Method(42); + }, + isEqual: function(a, b) { + return a === b; + }, + call: function(f, a) { + f(a); + }, +})`) + +func TestBool(t *testing.T) { + e := true + o := dummys.Get("someBool") + if v := o.Bool(); v != e { + t.Errorf("expected %#v, got %#v", e, v) + } + if i := o.Interface().(bool); i != e { + t.Errorf("expected %#v, got %#v", e, i) + } + if dummys.Set("otherBool", e); dummys.Get("otherBool").Bool() != e { + t.Fail() + } +} + +func TestStr(t *testing.T) { + e := "abc\u1234" + o := dummys.Get("someString") + if v := o.String(); v != e { + t.Errorf("expected %#v, got %#v", e, v) + } + if i := o.Interface().(string); i != e { + t.Errorf("expected %#v, got %#v", e, i) + } + if dummys.Set("otherString", e); dummys.Get("otherString").String() != e { + t.Fail() + } +} + +func TestInt(t *testing.T) { + e := 42 + o := dummys.Get("someInt") + if v := o.Int(); v != e { + t.Errorf("expected %#v, got %#v", e, v) + } + if i := int(o.Interface().(float64)); i != e { + t.Errorf("expected %#v, got %#v", e, i) + } + if dummys.Set("otherInt", e); dummys.Get("otherInt").Int() != e { + t.Fail() + } +} + +func TestFloat(t *testing.T) { + e := 42.123 + o := dummys.Get("someFloat") + if v := o.Float(); v != e { + t.Errorf("expected %#v, got %#v", e, v) + } + if i := o.Interface().(float64); i != e { + t.Errorf("expected %#v, got %#v", e, i) + } + if dummys.Set("otherFloat", e); dummys.Get("otherFloat").Float() != e { + t.Fail() + } +} + +func TestUndefined(t *testing.T) { + if dummys == js.Undefined || dummys.Get("xyz") != js.Undefined { + t.Fail() + } +} + +func TestNull(t *testing.T) { + var null *js.Object + dummys.Set("test", nil) + if null != nil || dummys == nil || dummys.Get("test") != nil { + t.Fail() + } +} + +func TestLength(t *testing.T) { + if dummys.Get("someArray").Length() != 3 { + t.Fail() + } +} + +func TestIndex(t *testing.T) { + if dummys.Get("someArray").Index(1).Int() != 42 { + t.Fail() + } +} + +func TestSetIndex(t *testing.T) { + dummys.Get("someArray").SetIndex(2, 99) + if dummys.Get("someArray").Index(2).Int() != 99 { + t.Fail() + } +} + +func TestCall(t *testing.T) { + var i int64 = 40 + if dummys.Call("add", i, 2).Int() != 42 { + t.Fail() + } + if dummys.Call("add", js.Global.Call("eval", "40"), 2).Int() != 42 { + t.Fail() + } +} + +func TestInvoke(t *testing.T) { + var i int64 = 40 + if dummys.Get("add").Invoke(i, 2).Int() != 42 { + t.Fail() + } +} + +func TestNew(t *testing.T) { + if js.Global.Get("Array").New(42).Length() != 42 { + t.Fail() + } +} + +type StructWithJsField1 struct { + *js.Object + Length int `js:"length"` + Slice func(int, int) []int `js:"slice"` +} + +type StructWithJsField2 struct { + object *js.Object // to hide members from public API + Length int `js:"length"` + Slice func(int, int) []int `js:"slice"` +} + +type Wrapper1 struct { + StructWithJsField1 + WrapperLength int `js:"length"` +} + +type Wrapper2 struct { + innerStruct *StructWithJsField2 + WrapperLength int `js:"length"` +} + +func TestReadingJsField(t *testing.T) { + a := StructWithJsField1{Object: js.Global.Get("Array").New(42)} + b := &StructWithJsField2{object: js.Global.Get("Array").New(42)} + wa := Wrapper1{StructWithJsField1: a} + wb := Wrapper2{innerStruct: b} + if a.Length != 42 || b.Length != 42 || wa.Length != 42 || wa.WrapperLength != 42 || wb.WrapperLength != 42 { + t.Fail() + } +} + +func TestWritingJsField(t *testing.T) { + a := StructWithJsField1{Object: js.Global.Get("Object").New()} + b := &StructWithJsField2{object: js.Global.Get("Object").New()} + a.Length = 42 + b.Length = 42 + if a.Get("length").Int() != 42 || b.object.Get("length").Int() != 42 { + t.Fail() + } +} + +func TestCallingJsField(t *testing.T) { + a := &StructWithJsField1{Object: js.Global.Get("Array").New(100)} + b := &StructWithJsField2{object: js.Global.Get("Array").New(100)} + a.SetIndex(3, 123) + b.object.SetIndex(3, 123) + f := a.Slice + a2 := a.Slice(2, 44) + b2 := b.Slice(2, 44) + c2 := f(2, 44) + if len(a2) != 42 || len(b2) != 42 || len(c2) != 42 || a2[1] != 123 || b2[1] != 123 || c2[1] != 123 { + t.Fail() + } +} + +func TestReflectionOnJsField(t *testing.T) { + a := StructWithJsField1{Object: js.Global.Get("Array").New(42)} + wa := Wrapper1{StructWithJsField1: a} + if reflect.ValueOf(a).FieldByName("Length").Int() != 42 || reflect.ValueOf(&wa).Elem().FieldByName("WrapperLength").Int() != 42 { + t.Fail() + } + reflect.ValueOf(&wa).Elem().FieldByName("WrapperLength").Set(reflect.ValueOf(10)) + if a.Length != 10 { + t.Fail() + } +} + +func TestUnboxing(t *testing.T) { + a := StructWithJsField1{Object: js.Global.Get("Object").New()} + b := &StructWithJsField2{object: js.Global.Get("Object").New()} + if !dummys.Call("isEqual", a, a.Object).Bool() || !dummys.Call("isEqual", b, b.object).Bool() { + t.Fail() + } + wa := Wrapper1{StructWithJsField1: a} + wb := Wrapper2{innerStruct: b} + if !dummys.Call("isEqual", wa, a.Object).Bool() || !dummys.Call("isEqual", wb, b.object).Bool() { + t.Fail() + } +} + +func TestBoxing(t *testing.T) { + o := js.Global.Get("Object").New() + dummys.Call("call", func(a StructWithJsField1) { + if a.Object != o { + t.Fail() + } + }, o) + dummys.Call("call", func(a *StructWithJsField2) { + if a.object != o { + t.Fail() + } + }, o) + dummys.Call("call", func(a Wrapper1) { + if a.Object != o { + t.Fail() + } + }, o) + dummys.Call("call", func(a Wrapper2) { + if a.innerStruct.object != o { + t.Fail() + } + }, o) +} + +func TestFunc(t *testing.T) { + a := dummys.Call("mapArray", []int{1, 2, 3}, func(e int64) int64 { return e + 40 }) + b := dummys.Call("mapArray", []int{1, 2, 3}, func(e ...int64) int64 { return e[0] + 40 }) + if a.Index(1).Int() != 42 || b.Index(1).Int() != 42 { + t.Fail() + } + + add := dummys.Get("add").Interface().(func(...interface{}) *js.Object) + var i int64 = 40 + if add(i, 2).Int() != 42 { + t.Fail() + } +} + +func TestDate(t *testing.T) { + d := time.Date(2013, time.August, 27, 22, 25, 11, 0, time.UTC) + if dummys.Call("toUnixTimestamp", d).Int() != int(d.Unix()) { + t.Fail() + } + + d2 := js.Global.Get("Date").New(d.UnixNano() / 1000000).Interface().(time.Time) + if !d2.Equal(d) { + t.Fail() + } +} + +// https://github.com/gopherjs/gopherjs/issues/287 +func TestInternalizeDate(t *testing.T) { + var a = time.Unix(0, (123 * time.Millisecond).Nanoseconds()) + var b time.Time + js.Global.Set("internalizeDate", func(t time.Time) { b = t }) + js.Global.Call("eval", "(internalizeDate(new Date(123)))") + if a != b { + t.Fail() + } +} + +func TestEquality(t *testing.T) { + if js.Global.Get("Array") != js.Global.Get("Array") || js.Global.Get("Array") == js.Global.Get("String") { + t.Fail() + } + type S struct{ *js.Object } + o1 := js.Global.Get("Object").New() + o2 := js.Global.Get("Object").New() + a := S{o1} + b := S{o1} + c := S{o2} + if a != b || a == c { + t.Fail() + } +} + +func TestUndefinedEquality(t *testing.T) { + var ui interface{} = js.Undefined + if ui != js.Undefined { + t.Fail() + } +} + +func TestInterfaceEquality(t *testing.T) { + o := js.Global.Get("Object").New() + var i interface{} = o + if i != o { + t.Fail() + } +} + +func TestUndefinedInternalization(t *testing.T) { + undefinedEqualsJsUndefined := func(i interface{}) bool { + return i == js.Undefined + } + js.Global.Set("undefinedEqualsJsUndefined", undefinedEqualsJsUndefined) + if !js.Global.Call("eval", "(undefinedEqualsJsUndefined(undefined))").Bool() { + t.Fail() + } +} + +func TestSameFuncWrapper(t *testing.T) { + a := func(_ string) {} // string argument to force wrapping + b := func(_ string) {} // string argument to force wrapping + if !dummys.Call("isEqual", a, a).Bool() || dummys.Call("isEqual", a, b).Bool() { + t.Fail() + } + if !dummys.Call("isEqual", somePackageFunction, somePackageFunction).Bool() { + t.Fail() + } + if !dummys.Call("isEqual", (*T).someMethod, (*T).someMethod).Bool() { + t.Fail() + } + t1 := &T{} + t2 := &T{} + if !dummys.Call("isEqual", t1.someMethod, t1.someMethod).Bool() || dummys.Call("isEqual", t1.someMethod, t2.someMethod).Bool() { + t.Fail() + } +} + +func somePackageFunction(_ string) { +} + +type T struct{} + +func (t *T) someMethod() { + println(42) +} + +func TestError(t *testing.T) { + defer func() { + err := recover() + if err == nil { + t.Fail() + } + if _, ok := err.(error); !ok { + t.Fail() + } + jsErr, ok := err.(*js.Error) + if !ok || jsErr.Get("stack") == js.Undefined { + t.Fail() + } + }() + js.Global.Get("notExisting").Call("throwsError") +} + +type F struct { + Field int +} + +func TestExternalizeField(t *testing.T) { + if dummys.Call("testField", map[string]int{"Field": 42}).Int() != 42 { + t.Fail() + } + if dummys.Call("testField", F{42}).Int() != 42 { + t.Fail() + } +} + +func TestMakeFunc(t *testing.T) { + o := js.Global.Get("Object").New() + o.Set("f", js.MakeFunc(func(this *js.Object, arguments []*js.Object) interface{} { + if this != o { + t.Fail() + } + if len(arguments) != 2 || arguments[0].Int() != 1 || arguments[1].Int() != 2 { + t.Fail() + } + return 3 + })) + if o.Call("f", 1, 2).Int() != 3 { + t.Fail() + } +} + +type M struct { + f int +} + +func (m *M) Method(a interface{}) map[string]string { + if a.(map[string]interface{})["x"].(float64) != 1 || m.f != 42 { + return nil + } + return map[string]string{ + "y": "z", + } +} + +func TestMakeWrapper(t *testing.T) { + m := &M{42} + if !js.Global.Call("eval", `(function(m) { return m.Method({x: 1})["y"] === "z"; })`).Invoke(js.MakeWrapper(m)).Bool() { + t.Fail() + } + + if js.MakeWrapper(m).Interface() != m { + t.Fail() + } + + f := func(m *M) { + if m.f != 42 { + t.Fail() + } + } + js.Global.Call("eval", `(function(f, m) { f(m); })`).Invoke(f, js.MakeWrapper(m)) +} + +func TestCallWithNull(t *testing.T) { + c := make(chan int, 1) + js.Global.Set("test", func() { + c <- 42 + }) + js.Global.Get("test").Call("call", nil) + if <-c != 42 { + t.Fail() + } +} + +func TestReflection(t *testing.T) { + o := js.Global.Call("eval", "({ answer: 42 })") + if reflect.ValueOf(o).Interface().(*js.Object) != o { + t.Fail() + } + + type S struct { + Field *js.Object + } + s := S{o} + + v := reflect.ValueOf(&s).Elem() + if v.Field(0).Interface().(*js.Object).Get("answer").Int() != 42 { + t.Fail() + } + if v.Field(0).MethodByName("Get").Call([]reflect.Value{reflect.ValueOf("answer")})[0].Interface().(*js.Object).Int() != 42 { + t.Fail() + } + v.Field(0).Set(reflect.ValueOf(js.Global.Call("eval", "({ answer: 100 })"))) + if s.Field.Get("answer").Int() != 100 { + t.Fail() + } + + if fmt.Sprintf("%+v", s) != "{Field:[object Object]}" { + t.Fail() + } +} + +func TestNil(t *testing.T) { + type S struct{ X int } + var s *S + if !dummys.Call("isEqual", s, nil).Bool() { + t.Fail() + } + + type T struct{ Field *S } + if dummys.Call("testField", T{}) != nil { + t.Fail() + } +} + +func TestNewArrayBuffer(t *testing.T) { + b := []byte("abcd") + a := js.NewArrayBuffer(b[1:3]) + if a.Get("byteLength").Int() != 2 { + t.Fail() + } +} + +func TestInternalizeExternalizeNull(t *testing.T) { + type S struct { + *js.Object + } + r := js.Global.Call("eval", "(function(f) { return f(null); })").Invoke(func(s S) S { + if s.Object != nil { + t.Fail() + } + return s + }) + if r != nil { + t.Fail() + } +} + +func TestInternalizeExternalizeUndefined(t *testing.T) { + type S struct { + *js.Object + } + r := js.Global.Call("eval", "(function(f) { return f(undefined); })").Invoke(func(s S) S { + if s.Object != js.Undefined { + t.Fail() + } + return s + }) + if r != js.Undefined { + t.Fail() + } +} + +func TestDereference(t *testing.T) { + s := *dummys + p := &s + if p != dummys { + t.Fail() + } +} + +func TestSurrogatePairs(t *testing.T) { + js.Global.Set("str", "\U0001F600") + str := js.Global.Get("str") + if str.Get("length").Int() != 2 || str.Call("charCodeAt", 0).Int() != 55357 || str.Call("charCodeAt", 1).Int() != 56832 { + t.Fail() + } + if str.String() != "\U0001F600" { + t.Fail() + } +} diff --git a/vendor/github.com/gopherjs/gopherjs/tool.go b/vendor/github.com/gopherjs/gopherjs/tool.go new file mode 100644 index 0000000000..91f796ba82 --- /dev/null +++ b/vendor/github.com/gopherjs/gopherjs/tool.go @@ -0,0 +1,791 @@ +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/scanner" + "go/token" + "go/types" + "io" + "io/ioutil" + "net" + "net/http" + "os" + "os/exec" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "text/template" + "time" + + gbuild "github.com/gopherjs/gopherjs/build" + "github.com/gopherjs/gopherjs/compiler" + "github.com/neelance/sourcemap" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "golang.org/x/crypto/ssh/terminal" +) + +var currentDirectory string + +func init() { + var err error + currentDirectory, err = os.Getwd() + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + currentDirectory, err = filepath.EvalSymlinks(currentDirectory) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + gopaths := filepath.SplitList(build.Default.GOPATH) + if len(gopaths) == 0 { + fmt.Fprintf(os.Stderr, "$GOPATH not set. For more details see: go help gopath\n") + os.Exit(1) + } +} + +func main() { + options := &gbuild.Options{CreateMapFile: true} + var pkgObj string + + pflag.BoolVarP(&options.Verbose, "verbose", "v", false, "print the names of packages as they are compiled") + flagVerbose := pflag.Lookup("verbose") + pflag.BoolVarP(&options.Quiet, "quiet", "q", false, "suppress non-fatal warnings") + flagQuiet := pflag.Lookup("quiet") + pflag.BoolVarP(&options.Watch, "watch", "w", false, "watch for changes to the source files") + flagWatch := pflag.Lookup("watch") + pflag.BoolVarP(&options.Minify, "minify", "m", false, "minify generated code") + flagMinify := pflag.Lookup("minify") + pflag.BoolVar(&options.Color, "color", terminal.IsTerminal(int(os.Stderr.Fd())) && os.Getenv("TERM") != "dumb", "colored output") + flagColor := pflag.Lookup("color") + tags := pflag.String("tags", "", "a list of build tags to consider satisfied during the build") + flagTags := pflag.Lookup("tags") + + cmdBuild := &cobra.Command{ + Use: "build [packages]", + Short: "compile packages and dependencies", + } + cmdBuild.Flags().StringVarP(&pkgObj, "output", "o", "", "output file") + cmdBuild.Flags().AddFlag(flagVerbose) + cmdBuild.Flags().AddFlag(flagQuiet) + cmdBuild.Flags().AddFlag(flagWatch) + cmdBuild.Flags().AddFlag(flagMinify) + cmdBuild.Flags().AddFlag(flagColor) + cmdBuild.Flags().AddFlag(flagTags) + cmdBuild.Run = func(cmd *cobra.Command, args []string) { + options.BuildTags = strings.Fields(*tags) + for { + s := gbuild.NewSession(options) + + exitCode := handleError(func() error { + if len(args) == 0 { + return s.BuildDir(currentDirectory, currentDirectory, pkgObj) + } + + if strings.HasSuffix(args[0], ".go") || strings.HasSuffix(args[0], ".inc.js") { + for _, arg := range args { + if !strings.HasSuffix(arg, ".go") && !strings.HasSuffix(arg, ".inc.js") { + return fmt.Errorf("named files must be .go or .inc.js files") + } + } + if pkgObj == "" { + basename := filepath.Base(args[0]) + pkgObj = basename[:len(basename)-3] + ".js" + } + names := make([]string, len(args)) + for i, name := range args { + name = filepath.ToSlash(name) + names[i] = name + if s.Watcher != nil { + s.Watcher.Add(name) + } + } + if err := s.BuildFiles(args, pkgObj, currentDirectory); err != nil { + return err + } + return nil + } + + for _, pkgPath := range args { + pkgPath = filepath.ToSlash(pkgPath) + if s.Watcher != nil { + s.Watcher.Add(pkgPath) + } + pkg, err := gbuild.Import(pkgPath, 0, s.InstallSuffix(), options.BuildTags) + if err != nil { + return err + } + if err := s.BuildPackage(pkg); err != nil { + return err + } + if pkgObj == "" { + pkgObj = filepath.Base(args[0]) + ".js" + } + if err := s.WriteCommandPackage(pkg, pkgObj); err != nil { + return err + } + } + return nil + }, options, nil) + + if s.Watcher == nil { + os.Exit(exitCode) + } + s.WaitForChange() + } + } + + cmdInstall := &cobra.Command{ + Use: "install [packages]", + Short: "compile and install packages and dependencies", + } + cmdInstall.Flags().AddFlag(flagVerbose) + cmdInstall.Flags().AddFlag(flagQuiet) + cmdInstall.Flags().AddFlag(flagWatch) + cmdInstall.Flags().AddFlag(flagMinify) + cmdInstall.Flags().AddFlag(flagColor) + cmdInstall.Flags().AddFlag(flagTags) + cmdInstall.Run = func(cmd *cobra.Command, args []string) { + options.BuildTags = strings.Fields(*tags) + for { + s := gbuild.NewSession(options) + + exitCode := handleError(func() error { + pkgs := args + if len(pkgs) == 0 { + firstGopathWorkspace := filepath.SplitList(build.Default.GOPATH)[0] // TODO: The GOPATH workspace that contains the package source should be chosen. + srcDir, err := filepath.EvalSymlinks(filepath.Join(firstGopathWorkspace, "src")) + if err != nil { + return err + } + if !strings.HasPrefix(currentDirectory, srcDir) { + return fmt.Errorf("gopherjs install: no install location for directory %s outside GOPATH", currentDirectory) + } + pkgPath, err := filepath.Rel(srcDir, currentDirectory) + if err != nil { + return err + } + pkgs = []string{pkgPath} + } + if cmd.Name() == "get" { + goGet := exec.Command("go", append([]string{"get", "-d", "-tags=js"}, pkgs...)...) + goGet.Stdout = os.Stdout + goGet.Stderr = os.Stderr + if err := goGet.Run(); err != nil { + return err + } + } + for _, pkgPath := range pkgs { + pkgPath = filepath.ToSlash(pkgPath) + if _, err := s.BuildImportPath(pkgPath); err != nil { + return err + } + pkg := s.Packages[pkgPath] + if err := s.WriteCommandPackage(pkg, pkg.PkgObj); err != nil { + return err + } + } + return nil + }, options, nil) + + if s.Watcher == nil { + os.Exit(exitCode) + } + s.WaitForChange() + } + } + + cmdGet := &cobra.Command{ + Use: "get [packages]", + Short: "download and install packages and dependencies", + } + cmdGet.Flags().AddFlag(flagVerbose) + cmdGet.Flags().AddFlag(flagQuiet) + cmdGet.Flags().AddFlag(flagWatch) + cmdGet.Flags().AddFlag(flagMinify) + cmdGet.Flags().AddFlag(flagColor) + cmdGet.Flags().AddFlag(flagTags) + cmdGet.Run = cmdInstall.Run + + cmdRun := &cobra.Command{ + Use: "run [gofiles...] [arguments...]", + Short: "compile and run Go program", + } + cmdRun.Run = func(cmd *cobra.Command, args []string) { + os.Exit(handleError(func() error { + lastSourceArg := 0 + for { + if lastSourceArg == len(args) || !(strings.HasSuffix(args[lastSourceArg], ".go") || strings.HasSuffix(args[lastSourceArg], ".inc.js")) { + break + } + lastSourceArg++ + } + if lastSourceArg == 0 { + return fmt.Errorf("gopherjs run: no go files listed") + } + + tempfile, err := ioutil.TempFile(currentDirectory, filepath.Base(args[0])+".") + if err != nil && strings.HasPrefix(currentDirectory, runtime.GOROOT()) { + tempfile, err = ioutil.TempFile("", filepath.Base(args[0])+".") + } + if err != nil { + return err + } + defer func() { + tempfile.Close() + os.Remove(tempfile.Name()) + os.Remove(tempfile.Name() + ".map") + }() + s := gbuild.NewSession(options) + if err := s.BuildFiles(args[:lastSourceArg], tempfile.Name(), currentDirectory); err != nil { + return err + } + if err := runNode(tempfile.Name(), args[lastSourceArg:], "", options.Quiet); err != nil { + return err + } + return nil + }, options, nil)) + } + + cmdTest := &cobra.Command{ + Use: "test [packages]", + Short: "test packages", + } + bench := cmdTest.Flags().String("bench", "", "Run benchmarks matching the regular expression. By default, no benchmarks run. To run all benchmarks, use '--bench=.'.") + run := cmdTest.Flags().String("run", "", "Run only those tests and examples matching the regular expression.") + short := cmdTest.Flags().Bool("short", false, "Tell long-running tests to shorten their run time.") + verbose := cmdTest.Flags().BoolP("verbose", "v", false, "Log all tests as they are run. Also print all text from Log and Logf calls even if the test succeeds.") + compileOnly := cmdTest.Flags().BoolP("compileonly", "c", false, "Compile the test binary to pkg.test.js but do not run it (where pkg is the last element of the package's import path). The file name can be changed with the -o flag.") + outputFilename := cmdTest.Flags().StringP("output", "o", "", "Compile the test binary to the named file. The test still runs (unless -c is specified).") + cmdTest.Flags().AddFlag(flagMinify) + cmdTest.Flags().AddFlag(flagColor) + cmdTest.Run = func(cmd *cobra.Command, args []string) { + os.Exit(handleError(func() error { + pkgs := make([]*gbuild.PackageData, len(args)) + for i, pkgPath := range args { + pkgPath = filepath.ToSlash(pkgPath) + var err error + pkgs[i], err = gbuild.Import(pkgPath, 0, "", nil) + if err != nil { + return err + } + } + if len(pkgs) == 0 { + firstGopathWorkspace := filepath.SplitList(build.Default.GOPATH)[0] + srcDir, err := filepath.EvalSymlinks(filepath.Join(firstGopathWorkspace, "src")) + if err != nil { + return err + } + var pkg *gbuild.PackageData + if strings.HasPrefix(currentDirectory, srcDir) { + pkgPath, err := filepath.Rel(srcDir, currentDirectory) + if err != nil { + return err + } + if pkg, err = gbuild.Import(pkgPath, 0, "", nil); err != nil { + return err + } + } + if pkg == nil { + if pkg, err = gbuild.ImportDir(currentDirectory, 0); err != nil { + return err + } + pkg.ImportPath = "_" + currentDirectory + } + pkgs = []*gbuild.PackageData{pkg} + } + + var exitErr error + for _, pkg := range pkgs { + if len(pkg.TestGoFiles) == 0 && len(pkg.XTestGoFiles) == 0 { + fmt.Printf("? \t%s\t[no test files]\n", pkg.ImportPath) + continue + } + + s := gbuild.NewSession(options) + tests := &testFuncs{Package: pkg.Package} + collectTests := func(testPkg *gbuild.PackageData, testPkgName string, needVar *bool) error { + if err := s.BuildPackage(testPkg); err != nil { + return err + } + + for _, decl := range testPkg.Archive.Declarations { + if strings.HasPrefix(decl.FullName, testPkg.ImportPath+".Test") { + tests.Tests = append(tests.Tests, testFunc{Package: testPkgName, Name: decl.FullName[len(testPkg.ImportPath)+1:]}) + *needVar = true + } + if strings.HasPrefix(decl.FullName, testPkg.ImportPath+".Benchmark") { + tests.Benchmarks = append(tests.Benchmarks, testFunc{Package: testPkgName, Name: decl.FullName[len(testPkg.ImportPath)+1:]}) + *needVar = true + } + } + return nil + } + + if err := collectTests(&gbuild.PackageData{ + Package: &build.Package{ + ImportPath: pkg.ImportPath, + Dir: pkg.Dir, + GoFiles: append(pkg.GoFiles, pkg.TestGoFiles...), + Imports: append(pkg.Imports, pkg.TestImports...), + }, + IsTest: true, + JSFiles: pkg.JSFiles, + }, "_test", &tests.NeedTest); err != nil { + return err + } + + if err := collectTests(&gbuild.PackageData{ + Package: &build.Package{ + ImportPath: pkg.ImportPath + "_test", + Dir: pkg.Dir, + GoFiles: pkg.XTestGoFiles, + Imports: pkg.XTestImports, + }, + IsTest: true, + }, "_xtest", &tests.NeedXtest); err != nil { + return err + } + + buf := bytes.NewBuffer(nil) + if err := testmainTmpl.Execute(buf, tests); err != nil { + return err + } + + fset := token.NewFileSet() + mainFile, err := parser.ParseFile(fset, "_testmain.go", buf, 0) + if err != nil { + return err + } + + mainPkg := &gbuild.PackageData{ + Package: &build.Package{ + Name: "main", + ImportPath: "main", + }, + } + mainPkg.Archive, err = compiler.Compile("main", []*ast.File{mainFile}, fset, s.ImportContext, options.Minify) + if err != nil { + return err + } + + if *compileOnly && *outputFilename == "" { + *outputFilename = pkg.Package.Name + "_test.js" + } + + var outfile *os.File + if *outputFilename != "" { + outfile, err = os.Create(*outputFilename) + if err != nil { + return err + } + } else { + outfile, err = ioutil.TempFile(currentDirectory, "test.") + if err != nil { + return err + } + } + defer func() { + outfile.Close() + if *outputFilename == "" { + os.Remove(outfile.Name()) + os.Remove(outfile.Name() + ".map") + } + }() + + if err := s.WriteCommandPackage(mainPkg, outfile.Name()); err != nil { + return err + } + + if *compileOnly { + continue + } + + var args []string + if *bench != "" { + args = append(args, "-test.bench", *bench) + } + if *run != "" { + args = append(args, "-test.run", *run) + } + if *short { + args = append(args, "-test.short") + } + if *verbose { + args = append(args, "-test.v") + } + status := "ok " + start := time.Now() + if err := runNode(outfile.Name(), args, pkg.Dir, options.Quiet); err != nil { + if _, ok := err.(*exec.ExitError); !ok { + return err + } + exitErr = err + status = "FAIL" + } + fmt.Printf("%s\t%s\t%.3fs\n", status, pkg.ImportPath, time.Now().Sub(start).Seconds()) + } + return exitErr + }, options, nil)) + } + + cmdTool := &cobra.Command{ + Use: "tool [command] [args...]", + Short: "run specified go tool", + } + cmdTool.Flags().BoolP("e", "e", false, "") + cmdTool.Flags().BoolP("l", "l", false, "") + cmdTool.Flags().BoolP("m", "m", false, "") + cmdTool.Flags().StringP("o", "o", "", "") + cmdTool.Flags().StringP("D", "D", "", "") + cmdTool.Flags().StringP("I", "I", "", "") + cmdTool.Run = func(cmd *cobra.Command, args []string) { + os.Exit(handleError(func() error { + if len(args) == 2 { + switch args[0][1] { + case 'g': + basename := filepath.Base(args[1]) + s := gbuild.NewSession(options) + if err := s.BuildFiles([]string{args[1]}, basename[:len(basename)-3]+".js", currentDirectory); err != nil { + return err + } + return nil + } + } + cmdTool.Help() + return nil + }, options, nil)) + } + + cmdServe := &cobra.Command{ + Use: "serve", + Short: "compile on-the-fly and serve", + } + cmdServe.Flags().AddFlag(flagVerbose) + cmdServe.Flags().AddFlag(flagQuiet) + cmdServe.Flags().AddFlag(flagMinify) + cmdServe.Flags().AddFlag(flagColor) + cmdServe.Flags().AddFlag(flagTags) + var addr string + cmdServe.Flags().StringVarP(&addr, "http", "", ":8080", "HTTP bind address to serve") + cmdServe.Run = func(cmd *cobra.Command, args []string) { + options.BuildTags = strings.Fields(*tags) + dirs := append(filepath.SplitList(build.Default.GOPATH), build.Default.GOROOT) + sourceFiles := http.FileServer(serveCommandFileSystem{options: options, dirs: dirs, sourceMaps: make(map[string][]byte)}) + ln, err := net.Listen("tcp", addr) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + if tcpAddr := ln.Addr().(*net.TCPAddr); tcpAddr.IP.Equal(net.IPv4zero) || tcpAddr.IP.Equal(net.IPv6zero) { // Any available addresses. + fmt.Printf("serving at http://localhost:%d and on port %d of any available addresses\n", tcpAddr.Port, tcpAddr.Port) + } else { // Specific address. + fmt.Printf("serving at http://%s\n", tcpAddr) + } + fmt.Fprintln(os.Stderr, http.Serve(tcpKeepAliveListener{ln.(*net.TCPListener)}, sourceFiles)) + } + + rootCmd := &cobra.Command{ + Use: "gopherjs", + Long: "GopherJS is a tool for compiling Go source code to JavaScript.", + } + rootCmd.AddCommand(cmdBuild, cmdGet, cmdInstall, cmdRun, cmdTest, cmdTool, cmdServe) + rootCmd.Execute() +} + +// tcpKeepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +type tcpKeepAliveListener struct { + *net.TCPListener +} + +func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { + tc, err := ln.AcceptTCP() + if err != nil { + return + } + tc.SetKeepAlive(true) + tc.SetKeepAlivePeriod(3 * time.Minute) + return tc, nil +} + +type serveCommandFileSystem struct { + options *gbuild.Options + dirs []string + sourceMaps map[string][]byte +} + +func (fs serveCommandFileSystem) Open(name string) (http.File, error) { + dir, file := path.Split(name) + base := path.Base(dir) // base is parent folder name, which becomes the output file name. + + isPkg := file == base+".js" + isMap := file == base+".js.map" + isIndex := file == "index.html" + + if isPkg || isMap || isIndex { + // If we're going to be serving our special files, make sure there's a Go command in this folder. + s := gbuild.NewSession(fs.options) + pkg, err := gbuild.Import(path.Dir(name[1:]), 0, s.InstallSuffix(), fs.options.BuildTags) + if err != nil || pkg.Name != "main" { + isPkg = false + isMap = false + isIndex = false + } + + switch { + case isPkg: + buf := bytes.NewBuffer(nil) + browserErrors := bytes.NewBuffer(nil) + exitCode := handleError(func() error { + if err := s.BuildPackage(pkg); err != nil { + return err + } + + sourceMapFilter := &compiler.SourceMapFilter{Writer: buf} + m := &sourcemap.Map{File: base + ".js"} + sourceMapFilter.MappingCallback = gbuild.NewMappingCallback(m, fs.options.GOROOT, fs.options.GOPATH) + + deps, err := compiler.ImportDependencies(pkg.Archive, s.ImportContext.Import) + if err != nil { + return err + } + if err := compiler.WriteProgramCode(deps, sourceMapFilter); err != nil { + return err + } + + mapBuf := bytes.NewBuffer(nil) + m.WriteTo(mapBuf) + buf.WriteString("//# sourceMappingURL=" + base + ".js.map\n") + fs.sourceMaps[name+".map"] = mapBuf.Bytes() + + return nil + }, fs.options, browserErrors) + if exitCode != 0 { + buf = browserErrors + } + return newFakeFile(base+".js", buf.Bytes()), nil + + case isMap: + if content, ok := fs.sourceMaps[name]; ok { + return newFakeFile(base+".js.map", content), nil + } + } + } + + for _, d := range fs.dirs { + f, err := http.Dir(filepath.Join(d, "src")).Open(name) + if err == nil { + return f, nil + } + } + + if isIndex { + // If there was no index.html file in any dirs, supply our own. + return newFakeFile("index.html", []byte(``)), nil + } + + return nil, os.ErrNotExist +} + +type fakeFile struct { + name string + size int + io.ReadSeeker +} + +func newFakeFile(name string, content []byte) *fakeFile { + return &fakeFile{name: name, size: len(content), ReadSeeker: bytes.NewReader(content)} +} + +func (f *fakeFile) Close() error { + return nil +} + +func (f *fakeFile) Readdir(count int) ([]os.FileInfo, error) { + return nil, os.ErrInvalid +} + +func (f *fakeFile) Stat() (os.FileInfo, error) { + return f, nil +} + +func (f *fakeFile) Name() string { + return f.name +} + +func (f *fakeFile) Size() int64 { + return int64(f.size) +} + +func (f *fakeFile) Mode() os.FileMode { + return 0 +} + +func (f *fakeFile) ModTime() time.Time { + return time.Time{} +} + +func (f *fakeFile) IsDir() bool { + return false +} + +func (f *fakeFile) Sys() interface{} { + return nil +} + +// If browserErrors is non-nil, errors are written for presentation in browser. +func handleError(f func() error, options *gbuild.Options, browserErrors *bytes.Buffer) int { + switch err := f().(type) { + case nil: + return 0 + case compiler.ErrorList: + for _, entry := range err { + printError(entry, options, browserErrors) + } + return 1 + case *exec.ExitError: + return err.Sys().(syscall.WaitStatus).ExitStatus() + default: + printError(err, options, browserErrors) + return 1 + } +} + +// sprintError returns an annotated error string without trailing newline. +func sprintError(err error) string { + makeRel := func(name string) string { + if relname, err := filepath.Rel(currentDirectory, name); err == nil { + return relname + } + return name + } + + switch e := err.(type) { + case *scanner.Error: + return fmt.Sprintf("%s:%d:%d: %s", makeRel(e.Pos.Filename), e.Pos.Line, e.Pos.Column, e.Msg) + case types.Error: + pos := e.Fset.Position(e.Pos) + return fmt.Sprintf("%s:%d:%d: %s", makeRel(pos.Filename), pos.Line, pos.Column, e.Msg) + default: + return fmt.Sprintf("%s", e) + } +} + +// printError prints err to Stderr with options. If browserErrors is non-nil, errors are also written for presentation in browser. +func printError(err error, options *gbuild.Options, browserErrors *bytes.Buffer) { + e := sprintError(err) + options.PrintError("%s\n", e) + if browserErrors != nil { + fmt.Fprintln(browserErrors, `console.error("`+template.JSEscapeString(e)+`");`) + } +} + +func runNode(script string, args []string, dir string, quiet bool) error { + var allArgs []string + if b, _ := strconv.ParseBool(os.Getenv("SOURCE_MAP_SUPPORT")); os.Getenv("SOURCE_MAP_SUPPORT") == "" || b { + allArgs = []string{"--require", "source-map-support/register"} + if err := exec.Command("node", "--require", "source-map-support/register", "--eval", "").Run(); err != nil { + if !quiet { + fmt.Fprintln(os.Stderr, "gopherjs: Source maps disabled. Use Node.js 4.x with source-map-support module for nice stack traces.") + } + allArgs = []string{} + } + } + + if runtime.GOOS != "windows" { + allArgs = append(allArgs, "--stack_size=10000", script) + } + + allArgs = append(allArgs, args...) + + node := exec.Command("node", allArgs...) + node.Dir = dir + node.Stdin = os.Stdin + node.Stdout = os.Stdout + node.Stderr = os.Stderr + err := node.Run() + if _, ok := err.(*exec.ExitError); err != nil && !ok { + err = fmt.Errorf("could not run Node.js: %s", err.Error()) + } + return err +} + +type testFuncs struct { + Tests []testFunc + Benchmarks []testFunc + Examples []testFunc + Package *build.Package + NeedTest bool + NeedXtest bool +} + +type testFunc struct { + Package string // imported package name (_test or _xtest) + Name string // function name + Output string // output, for examples +} + +var testmainTmpl = template.Must(template.New("main").Parse(` +package main + +import ( + "regexp" + "testing" + +{{if .NeedTest}} + _test {{.Package.ImportPath | printf "%q"}} +{{end}} +{{if .NeedXtest}} + _xtest {{.Package.ImportPath | printf "%s_test" | printf "%q"}} +{{end}} +) + +var tests = []testing.InternalTest{ +{{range .Tests}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var benchmarks = []testing.InternalBenchmark{ +{{range .Benchmarks}} + {"{{.Name}}", {{.Package}}.{{.Name}}}, +{{end}} +} + +var examples = []testing.InternalExample{ +{{range .Examples}} + {"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}}, +{{end}} +} + +var matchPat string +var matchRe *regexp.Regexp + +func matchString(pat, str string) (result bool, err error) { + if matchRe == nil || matchPat != pat { + matchPat = pat + matchRe, err = regexp.Compile(matchPat) + if err != nil { + return + } + } + return matchRe.MatchString(str), nil +} + +func main() { + testing.Main(matchString, tests, benchmarks, examples) +} + +`)) diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/.gitattributes b/vendor/github.com/graphite-ng/carbon-relay-ng/.gitattributes new file mode 100644 index 0000000000..a0ebf03eaf --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/.gitattributes @@ -0,0 +1 @@ +/ui/web/bindata.go binary diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/.gitignore b/vendor/github.com/graphite-ng/carbon-relay-ng/.gitignore new file mode 100644 index 0000000000..db912e5010 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/.gitignore @@ -0,0 +1,9 @@ +*.deb +*.swp +/carbon-relay-ng +/carbon-relay-ng.ini +/carbon-relay-ng.pid +/carbon-relay-ng.test +test_spool +spool/*.dat +*~ diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/Dockerfile b/vendor/github.com/graphite-ng/carbon-relay-ng/Dockerfile new file mode 100644 index 0000000000..76c69d46d8 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/Dockerfile @@ -0,0 +1,8 @@ +FROM gliderlabs/alpine +RUN apk --update add --no-cache ca-certificates +ADD carbon-relay-ng /bin/ +VOLUME /conf +ADD examples/carbon-relay-ng-docker.ini /conf/carbon-relay-ng.ini +RUN mkdir /var/spool/carbon-relay-ng +ENTRYPOINT ["/bin/carbon-relay-ng"] +CMD ["/conf/carbon-relay-ng.ini"] diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/LICENSE b/vendor/github.com/graphite-ng/carbon-relay-ng/LICENSE new file mode 100644 index 0000000000..363fa9ee77 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/LICENSE @@ -0,0 +1,29 @@ +Copyright 2012 Richard Crowley. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + +THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of Richard Crowley. diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/Makefile b/vendor/github.com/graphite-ng/carbon-relay-ng/Makefile new file mode 100644 index 0000000000..af8e5f4052 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/Makefile @@ -0,0 +1,140 @@ +VERSION=$(shell git describe --tags --always | sed 's/^v//') + + +build: + cd ui/web && go-bindata -pkg web admin_http_assets + find . -name '*.go' | grep -v '^\.\/vendor' | xargs gofmt -w -s + CGO_ENABLED=0 go build -ldflags "-X main.Version=$(VERSION)" ./cmd/carbon-relay-ng + +test: + go test ./... + +docker: build + docker build --tag=raintank/carbon-relay-ng:latest . + docker tag raintank/carbon-relay-ng raintank/carbon-relay-ng:$(VERSION) + +all: + +deb: build + mkdir -p build/deb-systemd + install -d debian/usr/bin debian/usr/share/man/man1 debian/etc/carbon-relay-ng debian/lib/systemd/system debian/var/run/carbon-relay-ng debian/usr/lib/tmpfiles.d + install carbon-relay-ng debian/usr/bin + install examples/carbon-relay-ng.ini debian/etc/carbon-relay-ng/carbon-relay-ng.conf + install examples/carbon-relay-ng-tmpfiles.conf debian/usr/lib/tmpfiles.d/carbon-relay-ng.conf + install examples/carbon-relay-ng.service debian/lib/systemd/system + install man/man1/carbon-relay-ng.1 debian/usr/share/man/man1 + gzip debian/usr/share/man/man1/carbon-relay-ng.1 + fpm \ + -s dir \ + -t deb \ + -n carbon-relay-ng \ + -v $(VERSION)-1 \ + -a native \ + --config-files etc/carbon-relay-ng/carbon-relay-ng.conf \ + -p build/deb-systemd/carbon-relay-ng-VERSION_ARCH.deb \ + -m "Dieter Plaetinck " \ + --description "Fast carbon relay+aggregator with admin interfaces for making changes online" \ + --license BSD \ + --url https://github.com/graphite-ng/carbon-relay-ng \ + --after-install examples/after_install.sh \ + -C debian . + rm -rf debian + +deb-upstart: build + mkdir build/deb-upstart + install -d debian/usr/bin debian/usr/share/man/man1 debian/etc/carbon-relay-ng + install carbon-relay-ng debian/usr/bin + install examples/carbon-relay-ng.ini debian/etc/carbon-relay-ng/carbon-relay-ng.conf + install man/man1/carbon-relay-ng.1 debian/usr/share/man/man1 + gzip debian/usr/share/man/man1/carbon-relay-ng.1 + fpm \ + -s dir \ + -t deb \ + -n carbon-relay-ng \ + -v $(VERSION)-1 \ + -a native \ + --config-files etc/carbon-relay-ng/carbon-relay-ng.conf \ + -p build/deb-upstart/carbon-relay-ng-VERSION_ARCH.deb \ + --deb-upstart examples/carbon-relay-ng.upstart \ + -m "Dieter Plaetinck " \ + --description "Fast carbon relay+aggregator with admin interfaces for making changes online" \ + --license BSD \ + --url https://github.com/graphite-ng/carbon-relay-ng \ + -C debian . + rm -rf debian + +rpm: build + mkdir -p build/centos-7 + install -d redhat/usr/bin redhat/usr/share/man/man1 redhat/etc/carbon-relay-ng redhat/lib/systemd/system redhat/var/run/carbon-relay-ng + install carbon-relay-ng redhat/usr/bin + install man/man1/carbon-relay-ng.1 redhat/usr/share/man/man1 + install examples/carbon-relay-ng.ini redhat/etc/carbon-relay-ng/carbon-relay-ng.conf + install examples/carbon-relay-ng.service redhat/lib/systemd/system + gzip redhat/usr/share/man/man1/carbon-relay-ng.1 + fpm \ + -s dir \ + -t rpm \ + -n carbon-relay-ng \ + -v $(VERSION) \ + --epoch 1 \ + -a native \ + --config-files etc/carbon-relay-ng/carbon-relay-ng.conf \ + -p build/centos-7/carbon-relay-ng-VERSION.el7.ARCH.rpm \ + -m "Dieter Plaetinck " \ + --description "Fast carbon relay+aggregator with admin interfaces for making changes online" \ + --license BSD \ + --url https://github.com/graphite-ng/carbon-relay-ng \ + --after-install examples/after_install.sh \ + -C redhat . + rm -rf redhat + +rpm-centos6: build + mkdir build/centos-6 + install -d redhat/usr/bin redhat/usr/share/man/man1 redhat/etc/carbon-relay-ng redhat/etc/init + install carbon-relay-ng redhat/usr/bin + install man/man1/carbon-relay-ng.1 redhat/usr/share/man/man1 + install examples/carbon-relay-ng.ini redhat/etc/carbon-relay-ng/carbon-relay-ng.conf + install examples/carbon-relay-ng.upstart-0.6.5 redhat/etc/init/carbon-relay-ng.conf + gzip redhat/usr/share/man/man1/carbon-relay-ng.1 + fpm \ + -s dir \ + -t rpm \ + -n carbon-relay-ng \ + -v $(VERSION) \ + --epoch 1 \ + -a native \ + --config-files etc/carbon-relay-ng/carbon-relay-ng.conf \ + -p build/centos-6/carbon-relay-ng-VERSION.el6.ARCH.rpm \ + -m "Dieter Plaetinck " \ + --description "Fast carbon relay+aggregator with admin interfaces for making changes online" \ + --license BSD \ + --url https://github.com/graphite-ng/carbon-relay-ng \ + -C redhat . + rm -rf redhat + +packages: deb deb-upstart rpm rpm-centos6 + +gh-pages: man + mkdir -p gh-pages + find man -name \*.html | xargs -I__ mv __ gh-pages/ + git checkout -q gh-pages + cp -R gh-pages/* ./ + rm -rf gh-pages + git add . + git commit -m "Rebuilt manual." + git push origin gh-pages + git checkout -q master + +install: build + go install + +man: + find man -name \*.ronn | xargs -n1 ronn --manual=carbon-relay-ng --style=toc + +run: build + ./carbon-relay-ng carbon-relay-ng.ini + +run-docker: + docker run --rm -p 2003:2003 -p 2004:2004 -p 8081:8081 -v $(pwd)/examples:/conf -v $(pwd)/spool:/spool raintank/carbon-relay-ng + +.PHONY: all deb gh-pages install man test build diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/README.md b/vendor/github.com/graphite-ng/carbon-relay-ng/README.md new file mode 100644 index 0000000000..997cec7e13 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/README.md @@ -0,0 +1,280 @@ +[![Circle CI](https://circleci.com/gh/graphite-ng/carbon-relay-ng.svg?style=shield)](https://circleci.com/gh/graphite-ng/carbon-relay-ng) +[![Go Report Card](https://goreportcard.com/badge/github.com/graphite-ng/carbon-relay-ng)](https://goreportcard.com/report/github.com/graphite-ng/carbon-relay-ng) +[![GoDoc](https://godoc.org/github.com/graphite-ng/carbon-relay-ng?status.svg)](https://godoc.org/github.com/graphite-ng/carbon-relay-ng) + +carbon-relay-ng +=============== + +A relay for carbon streams, in go. +Like carbon-relay from the graphite project, except it: + + * performs better: should be able to do about 100k ~ 1M million metrics per second depending on configuration and CPU speed. + * you can adjust the routing table at runtime, in real time using the web or telnet interface (though they may have some rough edges) + * has aggregator functionality built-in for cross-series, cross-time and cross-time-and-series aggregations. + * supports plaintext and pickle graphite routes (output) and metrics2.0/grafana.net, as well as kafka. + * graphite routes supports a per-route spooling policy. + (i.e. in case of an endpoint outage, we can temporarily queue the data up to disk and resume later) + * performs validation on all incoming metrics (see below) + * supported inputs: plaintext, pickle and AMQP (rabbitmq) + +This makes it easy to fanout to other tools that feed in on the metrics. +Or balance/split load, or provide redundancy, or partition the data, etc. +This pattern allows alerting and event processing systems to act on the data as it is received (which is much better than repeated reading from your storage) + + +![screenshot](https://raw.githubusercontent.com/graphite-ng/carbon-relay-ng/master/screenshots/screenshot.png) + + +Limitations +----------- + +* regex rewriter rules do not support limiting number of replacements, max must be set to -1 +* the web UI is not always reliable to make changes. the config file and tcp interface are safer and more complete anyway. +* internal metrics *must* be routed somewhere (e.g. into the relay itself) otherwise it'll [leak memory](https://github.com/graphite-ng/carbon-relay-ng/issues/50). + this is a silly bug but I haven't had time yet to fix it. + + +Releases & versions +------------------- + +see [https://github.com/graphite-ng/carbon-relay-ng/releases](https://github.com/graphite-ng/carbon-relay-ng/releases) + + +Instrumentation +--------------- + +* Extensive performance variables are available in json at http://localhost:8081/debug/vars2 (update port if you change it in config) +* You can also send metrics to graphite (or feed back into the relay), see config. +* Comes with a [grafana dashboard](https://github.com/graphite-ng/carbon-relay-ng/blob/master/grafana-dashboard.json) which you can also [download from the grafana dashboards site](https://grafana.com/dashboards/338) + +![grafana dashboard](https://raw.githubusercontent.com/graphite-ng/carbon-relay-ng/master/screenshots/grafana-screenshot.png) + + +Installation +------------ + +You can install packages from the [raintank packagecloud repository](https://packagecloud.io/raintank/raintank) +We automatically build packages for Ubuntu 14.04 (trusty), 16.04 (xenial), debian 8 (jessie), Centos6 and Centos7 when builds in CircleCI succeed. +[Instructions for enabling the repository](https://packagecloud.io/raintank/raintank/install) + +You can also just build a binary (see below) and run the binary with a config file like so: + +carbon-relay-ng [-cpuprofile cpuprofile-file] config-file + + +Building +-------- + +Requires Go 1.4 or higher. +We use https://github.com/kardianos/govendor to manage vendoring 3rd party libraries + + export GOPATH=/some/path/ + export PATH="$PATH:$GOPATH/bin" + go get -d github.com/graphite-ng/carbon-relay-ng + go get github.com/jteeuwen/go-bindata/... + cd "$GOPATH/src/github.com/graphite-ng/carbon-relay-ng" + # optional: check out an older version: git checkout v0.5 + make + + +Concepts +-------- + +You have 1 master routing table. This table contains 0-N routes. Each carbon route can contain 0-M destinations (tcp endpoints) + +First: "matching": you can match metrics on one or more of: prefix, substring, or regex. All 3 default to "" (empty string, i.e. allow all). +The conditions are AND-ed. Regexes are more resource intensive and hence should - and often can be - avoided. + +* All incoming metrics are validated and go into the table when valid. +* The table will then check metrics against the blacklist and discard when appropriate. +* Then metrics pass through the rewriters and are modified if applicable. Rewrite rules wrapped with forward slashes are interpreted as regular expressions. +* The table sends the metric to: + * the aggregators, who match the metrics against their rules, compute aggregations and feed results back into the table. see Aggregation section below for details. + * any routes that matches +* The route can have different behaviors, based on its type: + + * for grafanaNet / kafkaMdm routes, there is only a single endpoint so that's where the data goes. For standard/carbon routes you can control how data gets routed into destinations: + * sendAllMatch: send all metrics to all the defined endpoints (possibly, and commonly only 1 endpoint). + * sendFirstMatch: send the metrics to the first endpoint that matches it. + * consistentHashing: the algorithm is the same as Carbon's consistent hashing. + * round robin: the route is a RR pool (not implemented) + + +carbon-relay-ng (for now) focuses on staying up and not consuming much resources. + +For carbon routes: +if connection is up but slow, we drop the data +if connection is down and spooling enabled. we try to spool but if it's slow we drop the data +if connection is down and spooling disabled -> drop the data + +kafka and grafanaNet have an in-memory buffer and can be configured to blocking or non-blocking mode when the buffer runs full. + + +Input +----- + +As with the Python implementation of carbon-relay, metrics can be pushed to carbon-relay-ng via TCP +(plain text or pickle) or by using an AMQP broker such as RabbitMQ. To send metrics via AMQP, create +a topic exchange (named "metrics" in the example carbon-relay-ng.ini) and publish messages to it in +the usual metric format: ` `. An exclusive, ephemeral +queue will automatically be created and bound to the exchange, which carbon-relay-ng will consume from. + + +Validation +---------- + +All incoming metrics undergo some basic sanity checks before the metrics go into the routing table. We always check the following: + +* the value parses to an int or float +* the timestamp is a unix timestamp + +By default, we also apply the following checks to the metric name: + +* has 3 fields +* the key has no characters beside `a-z A-Z _ - . =` (fairly strict but graphite causes problems with various other characters) +* has no empty node (like field1.field2..field4) + +However, for legacy metrics, the `legacy_metric_validation` configuration parameter can be used to loosen the metric name checks. This can be useful when needing to forward metrics whose names you do not control. +The following are valid values for the `legacy_metric_validation` field: + +* `strict` -- All checks described above are in force. This is the default. +* `medium` -- We validate that the metric name has only ASCII characters and no embedded NULLs. +* `none` -- No metric name checks are performed. + +If we detect the metric is in metrics2.0 format we also check proper formatting, and unit and mtype are set. + +Invalid metrics are dropped and can be seen at /badMetrics/timespec.json where timespec is something like 30s, 10m, 24h, etc. +(the counters are also exported. See instrumentation section) + +You can also validate that for each series, each point is older than the previous. using the validate_order option. This is helpful for some backends like grafana.net + +Aggregation +----------- + +As discussed in concepts above, we can combine, at each point in time, the points of multiple series into a new series. +Note: +* The interval parameter let's you quantize ("fix") timestamps, for example with an interval of 60 seconds, if you have incoming metrics for times that differ from each other, but all fall within the same minute, they will be counted together. +* The wait parameter allows up to the specified amount of seconds to wait for values, With a wait of 120, metrics can come 2 minutes late and still be included in the aggregation results. +* The fmt parameter dictates what the metric key of the aggregated metric will be. use $1, $2, etc to refer to groups in the regex +* Note that we direct incoming values to an aggregation bucket based on the interval the timestamp is in, and the output key it generates. + This means that you can have 3 aggregation cases, based on how you set your regex, interval and fmt string. + - aggregation of points with different metric keys, but with the same, or similar timestamps) into one outgoing value (~ carbon-aggregator). + if you set the interval to the period between each incoming packet of a given key, and the fmt yields the same key for different input metric keys + - aggregation of individual metrics, i.e. packets for the same key, with different timestamps. For example if you receive values for the same key every second, you can aggregate into minutely buckets by setting interval to 60, and have the fmt yield a unique key for every input metric key. (~ graphite rollups) + - the combination: compute aggregates from values seen with different keys, and at multiple points in time. +* functions currently available: avg, delta, derive, last, max, min, stdev, sum +* aggregation output is routed via the routing table just like all other metrics. Note that aggregation output will never go back into aggregators (to prevent loops) and also bypasses the validation and blacklist and rewriters. +* see the included ini for examples +* each aggregator can be configured to cache regex matches or not. there is no cache size limit because a limited size, under a typical workload where we see each metric key sequentially, in perpetual cycles, would just result in cache thrashing and wasting memory. If enabled, all matches are cached for at least 100 times the wait parameter. By default, the cache is enabled for aggregators set up via commands (init commands in the config) but disabled for aggregators configured via config sections (due to a limitation in our config library). Basically enabling the cache means you trade in RAM for cpu. + +Rewriting +--------- + +Series names can be rewritten as they pass through the system by Rewriter rules, which are processed in series. + +Basic rules use simple old/new text replacement, and support a Max parameter to specify the maximum number of matched items to be replaced. + +Rewriter rules also support regexp syntax, which is enabled by wrapping the "old" parameter with forward slashes and setting "max" to -1. + +Regexp rules support [golang's standard regular expression syntax](https://golang.org/pkg/regexp/syntax/), and the "new" value can include [submatch identifiers](https://golang.org/pkg/regexp/#Regexp.Expand) in the format `${1}`. + +Examples (using init commands. you can also specify them in the config directly. see the included config): + +``` +# basic rewriter rule to replace first occurrence of "foo" with "bar" +addRewriter foo bar 1 + +# regexp rewriter rule to add a prefix of "prefix." to all series +addRewriter /^/ prefix. -1 + +# regexp rewriter rule to replace "server.X" with "servers.X.collectd" +addRewriter /server\.([^.]+)/ servers.${1}.collectd -1 +``` + + +Configuration +------------- + +Take a look at the included carbon-relay-ng.ini, which includes comments describing the available options. + +The major config sections are the `blacklist` array, and the `[[aggregation]]`, `[[rewriter]]` and `[[route]]` entries. + +[Overview of all routes/destinations config options and tuning options](https://github.com/graphite-ng/carbon-relay-ng/blob/master/docs/routes.md) + +You can also create routes, populate the blacklist, etc via the `init` config array using the same commands as the telnet interface, detailed below. + +TCP interface +------------- + +commands: + + help show this menu + view view full current routing table + + addBlack blacklist (drops matching metrics as soon as they are received) + + addRewriter add rewriter that will rewrite all old to new, max times + use /old/ to specify a regular expression match, with support for ${1} style identifiers in new + + addAgg [cache=true/false] add a new aggregation rule. + : aggregation function to use + avg + delta + derive + max + min + stdev + sum + regex to match incoming metrics. supports groups (numbered, see fmt) + format of output metric. you can use $1, $2, etc to refer to numbered groups + align odd timestamps of metrics into buckets by this interval in seconds. + amount of seconds to wait for "late" metric messages before computing and flushing final result. + + + addRoute [opts] [[...]] add a new route. note 2 spaces to separate destinations + : + sendAllMatch send metrics in the route to all destinations + sendFirstMatch send metrics in the route to the first one that matches it + consistentHashing distribute metrics between destinations using a hash algorithm + : + prefix= only take in metrics that have this prefix + sub= only take in metrics that match this substring + regex= only take in metrics that match this regex (expensive!) + : + a tcp endpoint. i.e. ip:port or hostname:port + for consistentHashing routes, an instance identifier can also be present: + hostname:port:instance + The instance is used to disambiguate multiple endpoints on the same host, as the Carbon-compatible consistent hashing algorithm does not take the port into account. + : + prefix= only take in metrics that have this prefix + sub= only take in metrics that match this substring + regex= only take in metrics that match this regex (expensive!) + flush= flush interval in ms + reconn= reconnection interval in ms + pickle={true,false} pickle output format instead of the default text protocol + spool={true,false} enable spooling for this endpoint + connbuf= connection buffer (how many metrics can be queued, not written into network conn). default 30k + iobuf= buffered io connection buffer in bytes. default: 2M + spoolbuf= num of metrics to buffer across disk-write stalls. practically, tune this to number of metrics in a second. default: 10000 + spoolmaxbytesperfile= max filesize for spool files. default: 200MiB (200 * 1024 * 1024) + spoolsyncevery= sync spool to disk every this many metrics. default: 10000 + spoolsyncperiod= sync spool to disk every this many milliseconds. default 1000 + spoolsleep= sleep this many microseconds(!) in between ingests from bulkdata/redo buffers into spool. default 500 + unspoolsleep= sleep this many microseconds(!) in between reads from the spool, when replaying spooled data. default 10 + + + + addDest not implemented yet + + modDest : modify dest by updating one or more space separated option strings + addr= new tcp address + prefix= new matcher prefix + sub= new matcher substring + regex= new matcher regex + + modRoute : modify route by updating one or more space separated option strings + prefix= new matcher prefix + sub= new matcher substring + regex= new matcher regex + + delRoute delete given route diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/circle.yml b/vendor/github.com/graphite-ng/carbon-relay-ng/circle.yml new file mode 100644 index 0000000000..40a8f50d12 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/circle.yml @@ -0,0 +1,50 @@ +machine: + environment: + GOPATH: "$HOME/go" + BASE: "$GOPATH/src/github.com/$CIRCLE_PROJECT_USERNAME" + REPO: "$BASE/$CIRCLE_PROJECT_REPONAME" + PATH: "$PATH:$GOPATH/bin" + services: + - docker + +# circleCi by default checks out the project to $HOME/carbon-relay-ng +# we need to move it to its gopath-ized location so that imports work +# we also symlink because: +# * even with our cd overrides, it starts by cd'ing into the default, showing ugly (but harmless errors) +# * it lets us just refer to things like 'build' directory which will point to the right place + +checkout: + post: + - cd $HOME && mkdir -p $BASE && mv carbon-relay-ng $BASE/ && ln -s $REPO carbon-relay-ng + +dependencies: + pre: + - gem install package_cloud + - gem install fpm + - sudo apt-get install rpm + - go get github.com/jteeuwen/go-bindata/... +test: + override: + - cd $REPO && go test -v -race $(go list ./... | grep -v /vendor/) + post: + - cd $REPO && make packages docker # note, this runs go-bindata again + # ideally we'd make sure that if go-bindata generated new content, we fail. + # because devs should have run go-bindata and checked in changes already. + # but because git checkout will change the timestamps and mode, it'll generate slight differences, and it's hard to filter those out + # so for now, we just let it slip and assume devs will do a good enough job maintaining the generated data + # - git diff --exit-code +general: + artifacts: + - build +deployment: + production: + branch: master + commands: + # Ubuntu 14.04 (trusty), 16.04 (xenial), debian 8 (jessie), Centos6 and Centos7. (no debian 7 wheezy because that's sysvinit which we don't have packages for) + - package_cloud push ${PACKAGECLOUD_REPO}/ubuntu/trusty build/deb-upstart/carbon-relay-ng-*.deb + - package_cloud push ${PACKAGECLOUD_REPO}/ubuntu/xenial build/deb-systemd/carbon-relay-ng-*.deb + - package_cloud push ${PACKAGECLOUD_REPO}/debian/jessie build/deb-systemd/carbon-relay-ng-*.deb + - package_cloud push ${PACKAGECLOUD_REPO}/el/6 build/centos-6/carbon-relay-ng-*.el6.*.rpm + - package_cloud push ${PACKAGECLOUD_REPO}/el/7 build/centos-7/carbon-relay-ng-*.el7.*.rpm + - docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_KEY + - ./deploy-docker.sh diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/clock/clock.go b/vendor/github.com/graphite-ng/carbon-relay-ng/clock/clock.go new file mode 100644 index 0000000000..a0a6e541f7 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/clock/clock.go @@ -0,0 +1,26 @@ +package clock + +import "time" + +// AlignedTick returns a tick channel so that, let's say interval is a second +// then it will tick at every whole second, or if it's 60s than it's every whole +// minute. Note that in my testing this is about .0001 to 0.0002 seconds later due +// to scheduling etc. +func AlignedTick(period time.Duration) <-chan time.Time { + // note that time.Ticker is not an interface, + // and that if we instantiate one, we can't write to its channel + // hence we can't leverage that type. + c := make(chan time.Time) + go func() { + for { + unix := time.Now().UnixNano() + diff := time.Duration(period - (time.Duration(unix) % period)) + time.Sleep(diff) + select { + case c <- time.Now(): + default: + } + } + }() + return c +} diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/deploy-docker.sh b/vendor/github.com/graphite-ng/carbon-relay-ng/deploy-docker.sh new file mode 100755 index 0000000000..3b43711de2 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/deploy-docker.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +VERSION=$(git describe --tags --always | sed 's/^v//') + +echo docker push raintank/carbon-relay-ng:$VERSION +docker push raintank/carbon-relay-ng:$VERSION + +echo docker push raintank/carbon-relay-ng:latest +docker push raintank/carbon-relay-ng:latest diff --git a/vendor/github.com/graphite-ng/carbon-relay-ng/grafana-dashboard.json b/vendor/github.com/graphite-ng/carbon-relay-ng/grafana-dashboard.json new file mode 100644 index 0000000000..ba694bacb0 --- /dev/null +++ b/vendor/github.com/graphite-ng/carbon-relay-ng/grafana-dashboard.json @@ -0,0 +1,1008 @@ +{ + "__inputs": [ + { + "name": "DS_GRAPHITE", + "label": "graphite", + "description": "", + "type": "datasource", + "pluginId": "graphite", + "pluginName": "Graphite" + } + ], + "__requires": [ + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "3.1.1" + }, + { + "type": "datasource", + "id": "graphite", + "name": "Graphite", + "version": "1.0.0" + } + ], + "id": null, + "title": "Carbon-relay-ng", + "tags": [ + "telemetry" + ], + "style": "dark", + "timezone": "browser", + "editable": true, + "hideControls": false, + "sharedCrosshair": false, + "rows": [ + { + "collapse": false, + "editable": true, + "height": "150px", + "panels": [ + { + "aliasColors": { + "blacklist": "#E24D42", + "direction_is_blacklist": "#E0752D", + "direction_is_in": "#3F6833", + "direction_is_unroutable": "#890F02", + "in": "#3F6833", + "unroutable": "#890F02" + }, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 1, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "refId": "A", + "target": "aliasByNode(perSecond(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.unit_is_Metric.*), 4)" + }, + { + "hide": false, + "refId": "B", + "target": "alias(perSecond(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.unit_is_Err.type_is_invalid), 'invalid')" + }, + { + "hide": false, + "refId": "C", + "target": "alias(perSecond(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.unit_is_Err.type_is_out_of_order), 'out-of-order')" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "$instance incoming", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "short", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 4, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "hide": false, + "refId": "A", + "target": "alias(perSecond(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.dest_is_$dest.unit_is_Metric.direction_is_out), 'to tcp')" + }, + { + "hide": false, + "refId": "B", + "target": "alias(perSecond(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.spool_is_$dest.unit_is_Metric.status_is_incomingRT), 'to spool')" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "dest metric directions", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "short", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "Row1" + }, + { + "collapse": false, + "editable": true, + "height": "150px", + "panels": [ + { + "aliasColors": { + "reason_is_bad_pickle": "#CCA300", + "reason_is_conn_down_no_spool": "#962D82", + "reason_is_slow_conn": "#E0752D", + "reason_is_slow_spool": "#BF1B00" + }, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 3, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "refId": "A", + "target": "aliasByNode(derivative(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.dest_is_$dest.unit_is_Metric.action_is_drop.*),6)" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "dest metric drops", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "short", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "type_is_truncated": "#890F02", + "type_is_write": "#C15C17" + }, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 6, + "stack": false, + "steppedLine": false, + "targets": [ + { + "refId": "A", + "target": "aliasByNode(perSecond(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.dest_is_$dest.unit_is_Err.*), 5)" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "dest Errors", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "short", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + }, + { + "collapse": false, + "editable": true, + "height": "250px", + "panels": [ + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 8, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "sortDesc": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "refId": "A", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_ticker.stat_is_max, 'ticker max')" + }, + { + "refId": "B", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_ticker.stat_is_max_99, 'ticker 99')" + }, + { + "refId": "C", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_ticker.stat_is_mean,'ticker mean')" + }, + { + "refId": "D", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max, 'manual max')" + }, + { + "refId": "E", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_max_99, 'manual 99')" + }, + { + "refId": "F", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_manual.stat_is_mean, 'manual mean')" + }, + { + "refId": "G", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_overflow.stat_is_max,'overflow max')" + }, + { + "refId": "H", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_overflow.stat_is_max99, 'overflow 99')" + }, + { + "refId": "I", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_B.what_is_FlushSize.type_is_overflow.mean,\"overflow mean\")" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "conn Flush size", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "bytes", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "metrics-buffered": "#E0752D" + }, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 3, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)", + "thresholdLine": false + }, + "id": 6, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": false, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 2, + "points": true, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "/bufferSize/", + "color": "#890F02", + "fill": 0, + "lines": true, + "linewidth": 3, + "points": false + }, + { + "alias": "memory-usage", + "lines": true, + "linewidth": 0, + "points": false, + "yaxis": 2 + }, + { + "alias": "allocated memory", + "lines": true, + "linewidth": 0, + "points": false, + "yaxis": 2 + } + ], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "refId": "A", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_Metric.what_is_numBuffered, 'numBuffered')" + }, + { + "refId": "B", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.dest_is_$dest.unit_is_Metric.what_is_bufferSize, 'bufferSize')" + }, + { + "refId": "C", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.what_is_virtual_memory.unit_is_Byte, 'memory-usage')" + }, + { + "refId": "D", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.what_is_memory_allocated.unit_is_Byte, 'allocated memory')" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "conn metrics in buffer", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "short", + "max": null, + "min": null, + "show": true + }, + { + "format": "bytes", + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": { + "\"overflow max\"": "#EF843C", + "\"overflow mean\"": "#58140C", + "\"ticker max\"": "#6ED0E0", + "\"ticker mean\"": "#0A50A1" + }, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 5, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 1, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "hide": false, + "refId": "A", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.dest_is_$dest.what_is_durationFlush.type_is_ticker.stat_is_max, '\"ticker max\"')" + }, + { + "hide": false, + "refId": "B", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.dest_is_$dest.what_is_durationFlush.type_is_ticker.stat_is_mean, '\"ticker mean\"')" + }, + { + "hide": false, + "refId": "C", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.dest_is_$dest.what_is_durationFlush.type_is_overflow.stat_is_max, '\"overflow max\"')" + }, + { + "hide": false, + "refId": "D", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.dest_is_$dest.what_is_durationFlush.type_is_overflow.stat_is_mean, '\"overflow mean\"')" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "conn flush durations", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "ns", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + }, + { + "collapse": false, + "editable": true, + "height": "150px", + "panels": [ + { + "aliasColors": { + "status_is_incomingBulk": "#3F2B5B", + "status_is_incomingRT": "#0A50A1" + }, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 10, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "refId": "A", + "target": "aliasByNode(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.spool_is_$dest.unit_is_Metric.*, 5)" + }, + { + "refId": "B", + "target": "" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "spool inputs", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "short", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 10, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 9, + "legend": { + "alignAsTable": false, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "hide": false, + "refId": "A", + "target": "alias(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.spool_is_$dest.unit_is_Metric.status_is_buffered, 'buffered')" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "spool metrics in buffer", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "none", + "label": "", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + }, + { + "aliasColors": {}, + "bars": false, + "datasource": "${DS_GRAPHITE}", + "editable": true, + "error": false, + "fill": 0, + "grid": { + "threshold1": null, + "threshold1Color": "rgba(216, 200, 27, 0.27)", + "threshold2": null, + "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "id": 11, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "span": 4, + "stack": false, + "steppedLine": false, + "targets": [ + { + "refId": "A", + "target": "aliasByNode(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.spool_is_$dest.operation_is*.stat_is_max_50, 5, 6)" + }, + { + "refId": "B", + "target": "aliasByNode(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.spool_is_$dest.operation_is*.stat_is_max_95, 5, 6)" + }, + { + "refId": "C", + "target": "aliasByNode(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.spool_is_$dest.operation_is*.stat_is_max, 5, 6)" + }, + { + "refId": "D", + "target": "aliasByNode(service_is_carbon-relay-ng.instance_is_$instance.mtype_is_gauge.unit_is_ns.spool_is_$dest.operation_is*.stat_is_mean, 5, 6)" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "spool durations", + "tooltip": { + "msResolution": false, + "shared": false, + "value_type": "cumulative", + "sort": 0 + }, + "type": "graph", + "xaxis": { + "show": true + }, + "yaxes": [ + { + "format": "ns", + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "max": null, + "min": null, + "show": true + } + ] + } + ], + "title": "New row" + } + ], + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "collapse": false, + "enable": true, + "notice": false, + "now": true, + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "status": "Stable", + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ], + "type": "timepicker" + }, + "templating": { + "enable": true, + "list": [ + { + "allFormat": "glob", + "current": {}, + "datasource": "${DS_GRAPHITE}", + "hide": 0, + "includeAll": true, + "name": "instance", + "options": [], + "query": "service_is_carbon-relay-ng.instance_is_*", + "refresh": 1, + "refresh_on_load": true, + "regex": "/instance_is_(.*)/", + "type": "query" + }, + { + "allFormat": "glob", + "current": {}, + "datasource": "${DS_GRAPHITE}", + "hide": 0, + "includeAll": true, + "name": "dest", + "options": [], + "query": "service_is_carbon-relay-ng.instance_is_$instance.mtype_is_counter.dest_is_*", + "refresh": 1, + "refresh_on_load": true, + "regex": "/dest_is_(.*)/", + "type": "query" + } + ] + }, + "annotations": { + "enable": true, + "list": [] + }, + "refresh": false, + "schemaVersion": 12, + "version": 2, + "links": [], + "gnetId": null +} diff --git a/vendor/github.com/hailocab/go-hostpool/.gitignore b/vendor/github.com/hailocab/go-hostpool/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/hailocab/go-hostpool/.travis.yml b/vendor/github.com/hailocab/go-hostpool/.travis.yml new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go b/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go index e6fe9a79c7..8627aa5cd2 100644 --- a/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go +++ b/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go @@ -17,7 +17,6 @@ func (r *epsilonHostPoolResponse) Mark(err error) { r.ended = time.Now() doMark(err, r) }) - } type epsilonGreedyHostPool struct { @@ -26,6 +25,7 @@ type epsilonGreedyHostPool struct { decayDuration time.Duration EpsilonValueCalculator // embed the epsilonValueCalculator timer + quit chan bool } // Construct an Epsilon Greedy HostPool @@ -54,6 +54,7 @@ func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonV decayDuration: decayDuration, EpsilonValueCalculator: calc, timer: &realTimer{}, + quit: make(chan bool), } // allocate structures @@ -65,6 +66,11 @@ func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonV return p } +func (p *epsilonGreedyHostPool) Close() { + // No need to do p.quit <- true as close(p.quit) does the trick. + close(p.quit) +} + func (p *epsilonGreedyHostPool) SetEpsilon(newEpsilon float32) { p.Lock() defer p.Unlock() @@ -83,10 +89,15 @@ func (p *epsilonGreedyHostPool) SetHosts(hosts []string) { func (p *epsilonGreedyHostPool) epsilonGreedyDecay() { durationPerBucket := p.decayDuration / epsilonBuckets - ticker := time.Tick(durationPerBucket) + ticker := time.NewTicker(durationPerBucket) for { - <-ticker - p.performEpsilonGreedyDecay() + select { + case <-p.quit: + ticker.Stop() + return + case <-ticker.C: + p.performEpsilonGreedyDecay() + } } } func (p *epsilonGreedyHostPool) performEpsilonGreedyDecay() { @@ -104,6 +115,10 @@ func (p *epsilonGreedyHostPool) Get() HostPoolResponse { p.Lock() defer p.Unlock() host := p.getEpsilonGreedy() + if host == "" { + return nil + } + started := time.Now() return &epsilonHostPoolResponse{ standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p}, @@ -161,6 +176,7 @@ func (p *epsilonGreedyHostPool) getEpsilonGreedy() string { if len(possibleHosts) != 0 { log.Println("Failed to randomly choose a host, Dan loses") } + return p.getRoundRobin() } diff --git a/vendor/github.com/hailocab/go-hostpool/example_test.go b/vendor/github.com/hailocab/go-hostpool/example_test.go new file mode 100644 index 0000000000..88d0e558c2 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/example_test.go @@ -0,0 +1,13 @@ +package hostpool + +import ( + "github.com/bitly/go-hostpool" +) + +func ExampleNewEpsilonGreedy() { + hp := hostpool.NewEpsilonGreedy([]string{"a", "b"}, 0, &hostpool.LinearEpsilonValueCalculator{}) + hostResponse := hp.Get() + hostname := hostResponse.Host() + err := nil // (make a request with hostname) + hostResponse.Mark(err) +} diff --git a/vendor/github.com/hailocab/go-hostpool/hostpool.go b/vendor/github.com/hailocab/go-hostpool/hostpool.go index 93ed1c7f14..702ca9276a 100644 --- a/vendor/github.com/hailocab/go-hostpool/hostpool.go +++ b/vendor/github.com/hailocab/go-hostpool/hostpool.go @@ -44,14 +44,22 @@ type HostPool interface { markFailed(HostPoolResponse) ResetAll() + // ReturnUnhealthy when called with true will prevent an unhealthy node from + // being returned and will instead return a nil HostPoolResponse. If using + // this feature then you should check the result of Get for nil + ReturnUnhealthy(v bool) Hosts() []string SetHosts([]string) + + // Close the hostpool and release all resources. + Close() } type standardHostPool struct { sync.RWMutex hosts map[string]*hostEntry hostList []*hostEntry + returnUnhealthy bool initialRetryDelay time.Duration maxRetryInterval time.Duration nextHostIndex int @@ -68,6 +76,7 @@ const defaultDecayDuration = time.Duration(5) * time.Minute // Construct a basic HostPool using the hostnames provided func New(hosts []string) HostPool { p := &standardHostPool{ + returnUnhealthy: true, hosts: make(map[string]*hostEntry, len(hosts)), hostList: make([]*hostEntry, len(hosts)), initialRetryDelay: time.Duration(30) * time.Second, @@ -113,6 +122,10 @@ func (p *standardHostPool) Get() HostPoolResponse { p.Lock() defer p.Unlock() host := p.getRoundRobin() + if host == "" { + return nil + } + return &standardHostPoolResponse{host: host, pool: p} } @@ -135,6 +148,11 @@ func (p *standardHostPool) getRoundRobin() string { } } + // all hosts are down and returnUnhealhy is false then return no host + if !p.returnUnhealthy { + return "" + } + // all hosts are down. re-add them p.doResetAll() p.nextHostIndex = 0 @@ -153,6 +171,12 @@ func (p *standardHostPool) SetHosts(hosts []string) { p.setHosts(hosts) } +func (p *standardHostPool) ReturnUnhealthy(v bool) { + p.Lock() + defer p.Unlock() + p.returnUnhealthy = v +} + func (p *standardHostPool) setHosts(hosts []string) { p.hosts = make(map[string]*hostEntry, len(hosts)) p.hostList = make([]*hostEntry, len(hosts)) @@ -176,6 +200,12 @@ func (p *standardHostPool) doResetAll() { } } +func (p *standardHostPool) Close() { + for _, h := range p.hosts { + h.dead = true + } +} + func (p *standardHostPool) markSuccess(hostR HostPoolResponse) { host := hostR.Host() p.Lock() @@ -205,7 +235,7 @@ func (p *standardHostPool) markFailed(hostR HostPoolResponse) { } func (p *standardHostPool) Hosts() []string { - hosts := make([]string, len(p.hosts)) + hosts := make([]string, 0, len(p.hosts)) for host := range p.hosts { hosts = append(hosts, host) } diff --git a/vendor/github.com/hailocab/go-hostpool/hostpool_test.go b/vendor/github.com/hailocab/go-hostpool/hostpool_test.go new file mode 100644 index 0000000000..e974aa74c5 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/hostpool_test.go @@ -0,0 +1,145 @@ +package hostpool + +import ( + "errors" + "github.com/bmizerany/assert" + "io/ioutil" + "log" + "math/rand" + "os" + "testing" + "time" +) + +func TestHostPool(t *testing.T) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stdout) + + dummyErr := errors.New("Dummy Error") + + p := New([]string{"a", "b", "c"}) + assert.Equal(t, p.Get().Host(), "a") + assert.Equal(t, p.Get().Host(), "b") + assert.Equal(t, p.Get().Host(), "c") + respA := p.Get() + assert.Equal(t, respA.Host(), "a") + + respA.Mark(dummyErr) + respB := p.Get() + respB.Mark(dummyErr) + respC := p.Get() + assert.Equal(t, respC.Host(), "c") + respC.Mark(nil) + // get again, and verify that it's still c + assert.Equal(t, p.Get().Host(), "c") + // now try to mark b as success; should fail because already marked + respB.Mark(nil) + assert.Equal(t, p.Get().Host(), "c") // would be b if it were not dead + // now restore a + respA = &standardHostPoolResponse{host: "a", pool: p} + respA.Mark(nil) + assert.Equal(t, p.Get().Host(), "a") + assert.Equal(t, p.Get().Host(), "c") + + // ensure that we get *something* back when all hosts fail + for _, host := range []string{"a", "b", "c"} { + response := &standardHostPoolResponse{host: host, pool: p} + response.Mark(dummyErr) + } + resp := p.Get() + assert.NotEqual(t, resp, nil) +} + +type mockTimer struct { + t int // the time it will always return +} + +func (t *mockTimer) between(start time.Time, end time.Time) time.Duration { + return time.Duration(t.t) * time.Millisecond +} + +func TestEpsilonGreedy(t *testing.T) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stdout) + + rand.Seed(10) + + iterations := 12000 + p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) + + timings := make(map[string]int64) + timings["a"] = 200 + timings["b"] = 300 + + hitCounts := make(map[string]int) + hitCounts["a"] = 0 + hitCounts["b"] = 0 + + log.Printf("starting first run (a, b)") + + for i := 0; i < iterations; i += 1 { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + host := hostR.Host() + hitCounts[host]++ + timing := timings[host] + p.timer = &mockTimer{t: int(timing)} + hostR.Mark(nil) + } + + for host := range hitCounts { + log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) + } + + assert.Equal(t, hitCounts["a"] > hitCounts["b"], true) + + hitCounts["a"] = 0 + hitCounts["b"] = 0 + log.Printf("starting second run (b, a)") + timings["a"] = 500 + timings["b"] = 100 + + for i := 0; i < iterations; i += 1 { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + host := hostR.Host() + hitCounts[host]++ + timing := timings[host] + p.timer = &mockTimer{t: int(timing)} + hostR.Mark(nil) + } + + for host := range hitCounts { + log.Printf("host %s hit %d times (%0.2f percent)", host, hitCounts[host], (float64(hitCounts[host])/float64(iterations))*100.0) + } + + assert.Equal(t, hitCounts["b"] > hitCounts["a"], true) +} + +func BenchmarkEpsilonGreedy(b *testing.B) { + b.StopTimer() + + // Make up some response times + zipfDist := rand.NewZipf(rand.New(rand.NewSource(0)), 1.1, 5, 5000) + timings := make([]uint64, b.N) + for i := 0; i < b.N; i++ { + timings[i] = zipfDist.Uint64() + } + + // Make the hostpool with a few hosts + p := NewEpsilonGreedy([]string{"a", "b"}, 0, &LinearEpsilonValueCalculator{}).(*epsilonGreedyHostPool) + + b.StartTimer() + for i := 0; i < b.N; i++ { + if i != 0 && i%100 == 0 { + p.performEpsilonGreedyDecay() + } + hostR := p.Get() + p.timer = &mockTimer{t: int(timings[i])} + hostR.Mark(nil) + } +} diff --git a/vendor/github.com/hashicorp/errwrap/errwrap_test.go b/vendor/github.com/hashicorp/errwrap/errwrap_test.go new file mode 100644 index 0000000000..5ae5f8e3cd --- /dev/null +++ b/vendor/github.com/hashicorp/errwrap/errwrap_test.go @@ -0,0 +1,94 @@ +package errwrap + +import ( + "fmt" + "testing" +) + +func TestWrappedError_impl(t *testing.T) { + var _ error = new(wrappedError) +} + +func TestGetAll(t *testing.T) { + cases := []struct { + Err error + Msg string + Len int + }{ + {}, + { + fmt.Errorf("foo"), + "foo", + 1, + }, + { + fmt.Errorf("bar"), + "foo", + 0, + }, + { + Wrapf("bar", fmt.Errorf("foo")), + "foo", + 1, + }, + { + Wrapf("{{err}}", fmt.Errorf("foo")), + "foo", + 2, + }, + { + Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))), + "foo", + 1, + }, + } + + for i, tc := range cases { + actual := GetAll(tc.Err, tc.Msg) + if len(actual) != tc.Len { + t.Fatalf("%d: bad: %#v", i, actual) + } + for _, v := range actual { + if v.Error() != tc.Msg { + t.Fatalf("%d: bad: %#v", i, actual) + } + } + } +} + +func TestGetAllType(t *testing.T) { + cases := []struct { + Err error + Type interface{} + Len int + }{ + {}, + { + fmt.Errorf("foo"), + "foo", + 0, + }, + { + fmt.Errorf("bar"), + fmt.Errorf("foo"), + 1, + }, + { + Wrapf("bar", fmt.Errorf("foo")), + fmt.Errorf("baz"), + 2, + }, + { + Wrapf("bar", Wrapf("baz", fmt.Errorf("foo"))), + Wrapf("", nil), + 0, + }, + } + + for i, tc := range cases { + actual := GetAllType(tc.Err, tc.Type) + if len(actual) != tc.Len { + t.Fatalf("%d: bad: %#v", i, actual) + } + } +} diff --git a/vendor/github.com/hashicorp/go-msgpack/README.md b/vendor/github.com/hashicorp/go-msgpack/README.md new file mode 100644 index 0000000000..0d9d754d0b --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/README.md @@ -0,0 +1,14 @@ +# go + +Collection of Open-Source Go libraries and tools. + +## Codec + +[Codec](https://github.com/ugorji/go/tree/master/codec#readme) is a High Performance and Feature-Rich Idiomatic encode/decode and rpc library for [msgpack](http://msgpack.org) and [Binc](https://github.com/ugorji/binc). + +Online documentation is at [http://godoc.org/github.com/ugorji/go/codec]. + +Install using: + + go get github.com/ugorji/go/codec + diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/bench_test.go b/vendor/github.com/hashicorp/go-msgpack/codec/bench_test.go new file mode 100644 index 0000000000..4d437035e0 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/bench_test.go @@ -0,0 +1,319 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "flag" + "fmt" + "reflect" + "runtime" + "testing" + "time" +) + +// Sample way to run: +// go test -bi -bv -bd=1 -benchmem -bench=. + +var ( + _ = fmt.Printf + benchTs *TestStruc + + approxSize int + + benchDoInitBench bool + benchVerify bool + benchUnscientificRes bool = false + //depth of 0 maps to ~400bytes json-encoded string, 1 maps to ~1400 bytes, etc + //For depth>1, we likely trigger stack growth for encoders, making benchmarking unreliable. + benchDepth int + benchInitDebug bool + benchCheckers []benchChecker +) + +type benchEncFn func(interface{}) ([]byte, error) +type benchDecFn func([]byte, interface{}) error +type benchIntfFn func() interface{} + +type benchChecker struct { + name string + encodefn benchEncFn + decodefn benchDecFn +} + +func benchInitFlags() { + flag.BoolVar(&benchInitDebug, "bg", false, "Bench Debug") + flag.IntVar(&benchDepth, "bd", 1, "Bench Depth: If >1, potential unreliable results due to stack growth") + flag.BoolVar(&benchDoInitBench, "bi", false, "Run Bench Init") + flag.BoolVar(&benchVerify, "bv", false, "Verify Decoded Value during Benchmark") + flag.BoolVar(&benchUnscientificRes, "bu", false, "Show Unscientific Results during Benchmark") +} + +func benchInit() { + benchTs = newTestStruc(benchDepth, true) + approxSize = approxDataSize(reflect.ValueOf(benchTs)) + bytesLen := 1024 * 4 * (benchDepth + 1) * (benchDepth + 1) + if bytesLen < approxSize { + bytesLen = approxSize + } + + benchCheckers = append(benchCheckers, + benchChecker{"msgpack", fnMsgpackEncodeFn, fnMsgpackDecodeFn}, + benchChecker{"binc-nosym", fnBincNoSymEncodeFn, fnBincNoSymDecodeFn}, + benchChecker{"binc-sym", fnBincSymEncodeFn, fnBincSymDecodeFn}, + benchChecker{"simple", fnSimpleEncodeFn, fnSimpleDecodeFn}, + benchChecker{"gob", fnGobEncodeFn, fnGobDecodeFn}, + benchChecker{"json", fnJsonEncodeFn, fnJsonDecodeFn}, + ) + if benchDoInitBench { + runBenchInit() + } +} + +func runBenchInit() { + logT(nil, "..............................................") + logT(nil, "BENCHMARK INIT: %v", time.Now()) + logT(nil, "To run full benchmark comparing encodings (MsgPack, Binc, Simple, JSON, GOB, etc), "+ + "use: \"go test -bench=.\"") + logT(nil, "Benchmark: ") + logT(nil, "\tStruct recursive Depth: %d", benchDepth) + if approxSize > 0 { + logT(nil, "\tApproxDeepSize Of benchmark Struct: %d bytes", approxSize) + } + if benchUnscientificRes { + logT(nil, "Benchmark One-Pass Run (with Unscientific Encode/Decode times): ") + } else { + logT(nil, "Benchmark One-Pass Run:") + } + for _, bc := range benchCheckers { + doBenchCheck(bc.name, bc.encodefn, bc.decodefn) + } + logT(nil, "..............................................") + if benchInitDebug { + logT(nil, "<<<<====>>>> depth: %v, ts: %#v\n", benchDepth, benchTs) + } +} + +func fnBenchNewTs() interface{} { + return new(TestStruc) +} + +func doBenchCheck(name string, encfn benchEncFn, decfn benchDecFn) { + runtime.GC() + tnow := time.Now() + buf, err := encfn(benchTs) + if err != nil { + logT(nil, "\t%10s: **** Error encoding benchTs: %v", name, err) + } + encDur := time.Now().Sub(tnow) + encLen := len(buf) + runtime.GC() + if !benchUnscientificRes { + logT(nil, "\t%10s: len: %d bytes\n", name, encLen) + return + } + tnow = time.Now() + if err = decfn(buf, new(TestStruc)); err != nil { + logT(nil, "\t%10s: **** Error decoding into new TestStruc: %v", name, err) + } + decDur := time.Now().Sub(tnow) + logT(nil, "\t%10s: len: %d bytes, encode: %v, decode: %v\n", name, encLen, encDur, decDur) +} + +func fnBenchmarkEncode(b *testing.B, encName string, ts interface{}, encfn benchEncFn) { + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := encfn(ts) + if err != nil { + logT(b, "Error encoding benchTs: %s: %v", encName, err) + b.FailNow() + } + } +} + +func fnBenchmarkDecode(b *testing.B, encName string, ts interface{}, + encfn benchEncFn, decfn benchDecFn, newfn benchIntfFn, +) { + buf, err := encfn(ts) + if err != nil { + logT(b, "Error encoding benchTs: %s: %v", encName, err) + b.FailNow() + } + runtime.GC() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ts = newfn() + if err = decfn(buf, ts); err != nil { + logT(b, "Error decoding into new TestStruc: %s: %v", encName, err) + b.FailNow() + } + if benchVerify { + if vts, vok := ts.(*TestStruc); vok { + verifyTsTree(b, vts) + } + } + } +} + +func verifyTsTree(b *testing.B, ts *TestStruc) { + var ts0, ts1m, ts2m, ts1s, ts2s *TestStruc + ts0 = ts + + if benchDepth > 0 { + ts1m, ts1s = verifyCheckAndGet(b, ts0) + } + + if benchDepth > 1 { + ts2m, ts2s = verifyCheckAndGet(b, ts1m) + } + for _, tsx := range []*TestStruc{ts0, ts1m, ts2m, ts1s, ts2s} { + if tsx != nil { + verifyOneOne(b, tsx) + } + } +} + +func verifyCheckAndGet(b *testing.B, ts0 *TestStruc) (ts1m *TestStruc, ts1s *TestStruc) { + // if len(ts1m.Ms) <= 2 { + // logT(b, "Error: ts1m.Ms len should be > 2. Got: %v", len(ts1m.Ms)) + // b.FailNow() + // } + if len(ts0.Its) == 0 { + logT(b, "Error: ts0.Islice len should be > 0. Got: %v", len(ts0.Its)) + b.FailNow() + } + ts1m = ts0.Mtsptr["0"] + ts1s = ts0.Its[0] + if ts1m == nil || ts1s == nil { + logT(b, "Error: At benchDepth 1, No *TestStruc found") + b.FailNow() + } + return +} + +func verifyOneOne(b *testing.B, ts *TestStruc) { + if ts.I64slice[2] != int64(3) { + logT(b, "Error: Decode failed by checking values") + b.FailNow() + } +} + +func fnMsgpackEncodeFn(ts interface{}) (bs []byte, err error) { + err = NewEncoderBytes(&bs, testMsgpackH).Encode(ts) + return +} + +func fnMsgpackDecodeFn(buf []byte, ts interface{}) error { + return NewDecoderBytes(buf, testMsgpackH).Decode(ts) +} + +func fnBincEncodeFn(ts interface{}, sym AsSymbolFlag) (bs []byte, err error) { + tSym := testBincH.AsSymbols + testBincH.AsSymbols = sym + err = NewEncoderBytes(&bs, testBincH).Encode(ts) + testBincH.AsSymbols = tSym + return +} + +func fnBincDecodeFn(buf []byte, ts interface{}, sym AsSymbolFlag) (err error) { + tSym := testBincH.AsSymbols + testBincH.AsSymbols = sym + err = NewDecoderBytes(buf, testBincH).Decode(ts) + testBincH.AsSymbols = tSym + return +} + +func fnBincNoSymEncodeFn(ts interface{}) (bs []byte, err error) { + return fnBincEncodeFn(ts, AsSymbolNone) +} + +func fnBincNoSymDecodeFn(buf []byte, ts interface{}) error { + return fnBincDecodeFn(buf, ts, AsSymbolNone) +} + +func fnBincSymEncodeFn(ts interface{}) (bs []byte, err error) { + return fnBincEncodeFn(ts, AsSymbolAll) +} + +func fnBincSymDecodeFn(buf []byte, ts interface{}) error { + return fnBincDecodeFn(buf, ts, AsSymbolAll) +} + +func fnSimpleEncodeFn(ts interface{}) (bs []byte, err error) { + err = NewEncoderBytes(&bs, testSimpleH).Encode(ts) + return +} + +func fnSimpleDecodeFn(buf []byte, ts interface{}) error { + return NewDecoderBytes(buf, testSimpleH).Decode(ts) +} + +func fnGobEncodeFn(ts interface{}) ([]byte, error) { + bbuf := new(bytes.Buffer) + err := gob.NewEncoder(bbuf).Encode(ts) + return bbuf.Bytes(), err +} + +func fnGobDecodeFn(buf []byte, ts interface{}) error { + return gob.NewDecoder(bytes.NewBuffer(buf)).Decode(ts) +} + +func fnJsonEncodeFn(ts interface{}) ([]byte, error) { + return json.Marshal(ts) +} + +func fnJsonDecodeFn(buf []byte, ts interface{}) error { + return json.Unmarshal(buf, ts) +} + +func Benchmark__Msgpack____Encode(b *testing.B) { + fnBenchmarkEncode(b, "msgpack", benchTs, fnMsgpackEncodeFn) +} + +func Benchmark__Msgpack____Decode(b *testing.B) { + fnBenchmarkDecode(b, "msgpack", benchTs, fnMsgpackEncodeFn, fnMsgpackDecodeFn, fnBenchNewTs) +} + +func Benchmark__Binc_NoSym_Encode(b *testing.B) { + fnBenchmarkEncode(b, "binc", benchTs, fnBincNoSymEncodeFn) +} + +func Benchmark__Binc_NoSym_Decode(b *testing.B) { + fnBenchmarkDecode(b, "binc", benchTs, fnBincNoSymEncodeFn, fnBincNoSymDecodeFn, fnBenchNewTs) +} + +func Benchmark__Binc_Sym___Encode(b *testing.B) { + fnBenchmarkEncode(b, "binc", benchTs, fnBincSymEncodeFn) +} + +func Benchmark__Binc_Sym___Decode(b *testing.B) { + fnBenchmarkDecode(b, "binc", benchTs, fnBincSymEncodeFn, fnBincSymDecodeFn, fnBenchNewTs) +} + +func Benchmark__Simple____Encode(b *testing.B) { + fnBenchmarkEncode(b, "simple", benchTs, fnSimpleEncodeFn) +} + +func Benchmark__Simple____Decode(b *testing.B) { + fnBenchmarkDecode(b, "simple", benchTs, fnSimpleEncodeFn, fnSimpleDecodeFn, fnBenchNewTs) +} + +func Benchmark__Gob________Encode(b *testing.B) { + fnBenchmarkEncode(b, "gob", benchTs, fnGobEncodeFn) +} + +func Benchmark__Gob________Decode(b *testing.B) { + fnBenchmarkDecode(b, "gob", benchTs, fnGobEncodeFn, fnGobDecodeFn, fnBenchNewTs) +} + +func Benchmark__Json_______Encode(b *testing.B) { + fnBenchmarkEncode(b, "json", benchTs, fnJsonEncodeFn) +} + +func Benchmark__Json_______Decode(b *testing.B) { + fnBenchmarkDecode(b, "json", benchTs, fnJsonEncodeFn, fnJsonDecodeFn, fnBenchNewTs) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/codecs_test.go b/vendor/github.com/hashicorp/go-msgpack/codec/codecs_test.go new file mode 100644 index 0000000000..cb184491f1 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/codecs_test.go @@ -0,0 +1,1002 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// Test works by using a slice of interfaces. +// It can test for encoding/decoding into/from a nil interface{} +// or passing the object to encode/decode into. +// +// There are basically 2 main tests here. +// First test internally encodes and decodes things and verifies that +// the artifact was as expected. +// Second test will use python msgpack to create a bunch of golden files, +// read those files, and compare them to what it should be. It then +// writes those files back out and compares the byte streams. +// +// Taken together, the tests are pretty extensive. + +import ( + "bytes" + "encoding/gob" + "flag" + "fmt" + "io/ioutil" + "math" + "net" + "net/rpc" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "strconv" + "sync/atomic" + "testing" + "time" +) + +type testVerifyArg int + +const ( + testVerifyMapTypeSame testVerifyArg = iota + testVerifyMapTypeStrIntf + testVerifyMapTypeIntfIntf + // testVerifySliceIntf + testVerifyForPython +) + +var ( + testInitDebug bool + testUseIoEncDec bool + testStructToArray bool + testWriteNoSymbols bool + + _ = fmt.Printf + skipVerifyVal interface{} = &(struct{}{}) + + // For Go Time, do not use a descriptive timezone. + // It's unnecessary, and makes it harder to do a reflect.DeepEqual. + // The Offset already tells what the offset should be, if not on UTC and unknown zone name. + timeLoc = time.FixedZone("", -8*60*60) // UTC-08:00 //time.UTC-8 + timeToCompare1 = time.Date(2012, 2, 2, 2, 2, 2, 2000, timeLoc) + timeToCompare2 = time.Date(1900, 2, 2, 2, 2, 2, 2000, timeLoc) + timeToCompare3 = time.Unix(0, 0).UTC() + timeToCompare4 = time.Time{}.UTC() + + table []interface{} // main items we encode + tableVerify []interface{} // we verify encoded things against this after decode + tableTestNilVerify []interface{} // for nil interface, use this to verify (rules are different) + tablePythonVerify []interface{} // for verifying for python, since Python sometimes + // will encode a float32 as float64, or large int as uint + testRpcInt = new(TestRpcInt) + testMsgpackH = &MsgpackHandle{} + testBincH = &BincHandle{} + testSimpleH = &SimpleHandle{} +) + +func testInitFlags() { + // delete(testDecOpts.ExtFuncs, timeTyp) + flag.BoolVar(&testInitDebug, "tg", false, "Test Debug") + flag.BoolVar(&testUseIoEncDec, "ti", false, "Use IO Reader/Writer for Marshal/Unmarshal") + flag.BoolVar(&testStructToArray, "ts", false, "Set StructToArray option") + flag.BoolVar(&testWriteNoSymbols, "tn", false, "Set NoSymbols option") +} + +type AnonInTestStruc struct { + AS string + AI64 int64 + AI16 int16 + AUi64 uint64 + ASslice []string + AI64slice []int64 +} + +type TestStruc struct { + S string + I64 int64 + I16 int16 + Ui64 uint64 + Ui8 uint8 + B bool + By byte + + Sslice []string + I64slice []int64 + I16slice []int16 + Ui64slice []uint64 + Ui8slice []uint8 + Bslice []bool + Byslice []byte + + Islice []interface{} + Iptrslice []*int64 + + AnonInTestStruc + + //M map[interface{}]interface{} `json:"-",bson:"-"` + Ms map[string]interface{} + Msi64 map[string]int64 + + Nintf interface{} //don't set this, so we can test for nil + T time.Time + Nmap map[string]bool //don't set this, so we can test for nil + Nslice []byte //don't set this, so we can test for nil + Nint64 *int64 //don't set this, so we can test for nil + Mtsptr map[string]*TestStruc + Mts map[string]TestStruc + Its []*TestStruc + Nteststruc *TestStruc +} + +type TestABC struct { + A, B, C string +} + +type TestRpcInt struct { + i int +} + +func (r *TestRpcInt) Update(n int, res *int) error { r.i = n; *res = r.i; return nil } +func (r *TestRpcInt) Square(ignore int, res *int) error { *res = r.i * r.i; return nil } +func (r *TestRpcInt) Mult(n int, res *int) error { *res = r.i * n; return nil } +func (r *TestRpcInt) EchoStruct(arg TestABC, res *string) error { + *res = fmt.Sprintf("%#v", arg) + return nil +} +func (r *TestRpcInt) Echo123(args []string, res *string) error { + *res = fmt.Sprintf("%#v", args) + return nil +} + +func testVerifyVal(v interface{}, arg testVerifyArg) (v2 interface{}) { + //for python msgpack, + // - all positive integers are unsigned 64-bit ints + // - all floats are float64 + switch iv := v.(type) { + case int8: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int16: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int32: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case int64: + if iv > 0 { + v2 = uint64(iv) + } else { + v2 = int64(iv) + } + case uint8: + v2 = uint64(iv) + case uint16: + v2 = uint64(iv) + case uint32: + v2 = uint64(iv) + case uint64: + v2 = uint64(iv) + case float32: + v2 = float64(iv) + case float64: + v2 = float64(iv) + case []interface{}: + m2 := make([]interface{}, len(iv)) + for j, vj := range iv { + m2[j] = testVerifyVal(vj, arg) + } + v2 = m2 + case map[string]bool: + switch arg { + case testVerifyMapTypeSame: + m2 := make(map[string]bool) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + case testVerifyMapTypeStrIntf, testVerifyForPython: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + case testVerifyMapTypeIntfIntf: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[kj] = kv + } + v2 = m2 + } + case map[string]interface{}: + switch arg { + case testVerifyMapTypeSame: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + case testVerifyMapTypeStrIntf, testVerifyForPython: + m2 := make(map[string]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + case testVerifyMapTypeIntfIntf: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[kj] = testVerifyVal(kv, arg) + } + v2 = m2 + } + case map[interface{}]interface{}: + m2 := make(map[interface{}]interface{}) + for kj, kv := range iv { + m2[testVerifyVal(kj, arg)] = testVerifyVal(kv, arg) + } + v2 = m2 + case time.Time: + switch arg { + case testVerifyForPython: + if iv2 := iv.UnixNano(); iv2 > 0 { + v2 = uint64(iv2) + } else { + v2 = int64(iv2) + } + default: + v2 = v + } + default: + v2 = v + } + return +} + +func testInit() { + gob.Register(new(TestStruc)) + if testInitDebug { + ts0 := newTestStruc(2, false) + fmt.Printf("====> depth: %v, ts: %#v\n", 2, ts0) + } + + testBincH.StructToArray = testStructToArray + if testWriteNoSymbols { + testBincH.AsSymbols = AsSymbolNone + } else { + testBincH.AsSymbols = AsSymbolAll + } + testMsgpackH.StructToArray = testStructToArray + testMsgpackH.RawToString = true + // testMsgpackH.AddExt(byteSliceTyp, 0, testMsgpackH.BinaryEncodeExt, testMsgpackH.BinaryDecodeExt) + // testMsgpackH.AddExt(timeTyp, 1, testMsgpackH.TimeEncodeExt, testMsgpackH.TimeDecodeExt) + timeEncExt := func(rv reflect.Value) ([]byte, error) { + return encodeTime(rv.Interface().(time.Time)), nil + } + timeDecExt := func(rv reflect.Value, bs []byte) error { + tt, err := decodeTime(bs) + if err == nil { + rv.Set(reflect.ValueOf(tt)) + } + return err + } + + // add extensions for msgpack, simple for time.Time, so we can encode/decode same way. + testMsgpackH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) + testSimpleH.AddExt(timeTyp, 1, timeEncExt, timeDecExt) + + primitives := []interface{}{ + int8(-8), + int16(-1616), + int32(-32323232), + int64(-6464646464646464), + uint8(192), + uint16(1616), + uint32(32323232), + uint64(6464646464646464), + byte(192), + float32(-3232.0), + float64(-6464646464.0), + float32(3232.0), + float64(6464646464.0), + false, + true, + nil, + "someday", + "", + "bytestring", + timeToCompare1, + timeToCompare2, + timeToCompare3, + timeToCompare4, + } + mapsAndStrucs := []interface{}{ + map[string]bool{ + "true": true, + "false": false, + }, + map[string]interface{}{ + "true": "True", + "false": false, + "uint16(1616)": uint16(1616), + }, + //add a complex combo map in here. (map has list which has map) + //note that after the first thing, everything else should be generic. + map[string]interface{}{ + "list": []interface{}{ + int16(1616), + int32(32323232), + true, + float32(-3232.0), + map[string]interface{}{ + "TRUE": true, + "FALSE": false, + }, + []interface{}{true, false}, + }, + "int32": int32(32323232), + "bool": true, + "LONG STRING": "123456789012345678901234567890123456789012345678901234567890", + "SHORT STRING": "1234567890", + }, + map[interface{}]interface{}{ + true: "true", + uint8(138): false, + "false": uint8(200), + }, + newTestStruc(0, false), + } + + table = []interface{}{} + table = append(table, primitives...) //0-19 are primitives + table = append(table, primitives) //20 is a list of primitives + table = append(table, mapsAndStrucs...) //21-24 are maps. 25 is a *struct + + tableVerify = make([]interface{}, len(table)) + tableTestNilVerify = make([]interface{}, len(table)) + tablePythonVerify = make([]interface{}, len(table)) + + lp := len(primitives) + av := tableVerify + for i, v := range table { + if i == lp+3 { + av[i] = skipVerifyVal + continue + } + //av[i] = testVerifyVal(v, testVerifyMapTypeSame) + switch v.(type) { + case []interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + case map[string]interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + case map[interface{}]interface{}: + av[i] = testVerifyVal(v, testVerifyMapTypeSame) + default: + av[i] = v + } + } + + av = tableTestNilVerify + for i, v := range table { + if i > lp+3 { + av[i] = skipVerifyVal + continue + } + av[i] = testVerifyVal(v, testVerifyMapTypeStrIntf) + } + + av = tablePythonVerify + for i, v := range table { + if i > lp+3 { + av[i] = skipVerifyVal + continue + } + av[i] = testVerifyVal(v, testVerifyForPython) + } + + tablePythonVerify = tablePythonVerify[:24] +} + +func testUnmarshal(v interface{}, data []byte, h Handle) error { + if testUseIoEncDec { + return NewDecoder(bytes.NewBuffer(data), h).Decode(v) + } + return NewDecoderBytes(data, h).Decode(v) +} + +func testMarshal(v interface{}, h Handle) (bs []byte, err error) { + if testUseIoEncDec { + var buf bytes.Buffer + err = NewEncoder(&buf, h).Encode(v) + bs = buf.Bytes() + return + } + err = NewEncoderBytes(&bs, h).Encode(v) + return +} + +func testMarshalErr(v interface{}, h Handle, t *testing.T, name string) (bs []byte, err error) { + if bs, err = testMarshal(v, h); err != nil { + logT(t, "Error encoding %s: %v, Err: %v", name, v, err) + t.FailNow() + } + return +} + +func testUnmarshalErr(v interface{}, data []byte, h Handle, t *testing.T, name string) (err error) { + if err = testUnmarshal(v, data, h); err != nil { + logT(t, "Error Decoding into %s: %v, Err: %v", name, v, err) + t.FailNow() + } + return +} + +func newTestStruc(depth int, bench bool) (ts *TestStruc) { + var i64a, i64b, i64c, i64d int64 = 64, 6464, 646464, 64646464 + + ts = &TestStruc{ + S: "some string", + I64: math.MaxInt64 * 2 / 3, // 64, + I16: 16, + Ui64: uint64(int64(math.MaxInt64 * 2 / 3)), // 64, //don't use MaxUint64, as bson can't write it + Ui8: 160, + B: true, + By: 5, + + Sslice: []string{"one", "two", "three"}, + I64slice: []int64{1, 2, 3}, + I16slice: []int16{4, 5, 6}, + Ui64slice: []uint64{137, 138, 139}, + Ui8slice: []uint8{210, 211, 212}, + Bslice: []bool{true, false, true, false}, + Byslice: []byte{13, 14, 15}, + + Islice: []interface{}{"true", true, "no", false, uint64(288), float64(0.4)}, + + Ms: map[string]interface{}{ + "true": "true", + "int64(9)": false, + }, + Msi64: map[string]int64{ + "one": 1, + "two": 2, + }, + T: timeToCompare1, + AnonInTestStruc: AnonInTestStruc{ + AS: "A-String", + AI64: 64, + AI16: 16, + AUi64: 64, + ASslice: []string{"Aone", "Atwo", "Athree"}, + AI64slice: []int64{1, 2, 3}, + }, + } + //For benchmarks, some things will not work. + if !bench { + //json and bson require string keys in maps + //ts.M = map[interface{}]interface{}{ + // true: "true", + // int8(9): false, + //} + //gob cannot encode nil in element in array (encodeArray: nil element) + ts.Iptrslice = []*int64{nil, &i64a, nil, &i64b, nil, &i64c, nil, &i64d, nil} + // ts.Iptrslice = nil + } + if depth > 0 { + depth-- + if ts.Mtsptr == nil { + ts.Mtsptr = make(map[string]*TestStruc) + } + if ts.Mts == nil { + ts.Mts = make(map[string]TestStruc) + } + ts.Mtsptr["0"] = newTestStruc(depth, bench) + ts.Mts["0"] = *(ts.Mtsptr["0"]) + ts.Its = append(ts.Its, ts.Mtsptr["0"]) + } + return +} + +// doTestCodecTableOne allows us test for different variations based on arguments passed. +func doTestCodecTableOne(t *testing.T, testNil bool, h Handle, + vs []interface{}, vsVerify []interface{}) { + //if testNil, then just test for when a pointer to a nil interface{} is passed. It should work. + //Current setup allows us test (at least manually) the nil interface or typed interface. + logT(t, "================ TestNil: %v ================\n", testNil) + for i, v0 := range vs { + logT(t, "..............................................") + logT(t, " Testing: #%d:, %T, %#v\n", i, v0, v0) + b0, err := testMarshalErr(v0, h, t, "v0") + if err != nil { + continue + } + logT(t, " Encoded bytes: len: %v, %v\n", len(b0), b0) + + var v1 interface{} + + if testNil { + err = testUnmarshal(&v1, b0, h) + } else { + if v0 != nil { + v0rt := reflect.TypeOf(v0) // ptr + rv1 := reflect.New(v0rt) + err = testUnmarshal(rv1.Interface(), b0, h) + v1 = rv1.Elem().Interface() + // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() + } + } + + logT(t, " v1 returned: %T, %#v", v1, v1) + // if v1 != nil { + // logT(t, " v1 returned: %T, %#v", v1, v1) + // //we always indirect, because ptr to typed value may be passed (if not testNil) + // v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() + // } + if err != nil { + logT(t, "-------- Error: %v. Partial return: %v", err, v1) + failT(t) + continue + } + v0check := vsVerify[i] + if v0check == skipVerifyVal { + logT(t, " Nil Check skipped: Decoded: %T, %#v\n", v1, v1) + continue + } + + if err = deepEqual(v0check, v1); err == nil { + logT(t, "++++++++ Before and After marshal matched\n") + } else { + logT(t, "-------- Before and After marshal do not match: Error: %v"+ + " ====> GOLDEN: (%T) %#v, DECODED: (%T) %#v\n", err, v0check, v0check, v1, v1) + failT(t) + } + } +} + +func testCodecTableOne(t *testing.T, h Handle) { + // func TestMsgpackAllExperimental(t *testing.T) { + // dopts := testDecOpts(nil, nil, false, true, true), + + switch v := h.(type) { + case *MsgpackHandle: + var oldWriteExt, oldRawToString bool + oldWriteExt, v.WriteExt = v.WriteExt, true + oldRawToString, v.RawToString = v.RawToString, true + doTestCodecTableOne(t, false, h, table, tableVerify) + v.WriteExt, v.RawToString = oldWriteExt, oldRawToString + default: + doTestCodecTableOne(t, false, h, table, tableVerify) + } + // func TestMsgpackAll(t *testing.T) { + idxTime, numPrim, numMap := 19, 23, 4 + + //skip []interface{} containing time.Time + doTestCodecTableOne(t, false, h, table[:numPrim], tableVerify[:numPrim]) + doTestCodecTableOne(t, false, h, table[numPrim+1:], tableVerify[numPrim+1:]) + // func TestMsgpackNilStringMap(t *testing.T) { + var oldMapType reflect.Type + v := h.getBasicHandle() + oldMapType, v.MapType = v.MapType, mapStrIntfTyp + + //skip time.Time, []interface{} containing time.Time, last map, and newStruc + doTestCodecTableOne(t, true, h, table[:idxTime], tableTestNilVerify[:idxTime]) + doTestCodecTableOne(t, true, h, table[numPrim+1:numPrim+numMap], tableTestNilVerify[numPrim+1:numPrim+numMap]) + + v.MapType = oldMapType + + // func TestMsgpackNilIntf(t *testing.T) { + + //do newTestStruc and last element of map + doTestCodecTableOne(t, true, h, table[numPrim+numMap:], tableTestNilVerify[numPrim+numMap:]) + //TODO? What is this one? + //doTestCodecTableOne(t, true, h, table[17:18], tableTestNilVerify[17:18]) +} + +func testCodecMiscOne(t *testing.T, h Handle) { + b, err := testMarshalErr(32, h, t, "32") + // Cannot do this nil one, because faster type assertion decoding will panic + // var i *int32 + // if err = testUnmarshal(b, i, nil); err == nil { + // logT(t, "------- Expecting error because we cannot unmarshal to int32 nil ptr") + // t.FailNow() + // } + var i2 int32 = 0 + err = testUnmarshalErr(&i2, b, h, t, "int32-ptr") + if i2 != int32(32) { + logT(t, "------- didn't unmarshal to 32: Received: %d", i2) + t.FailNow() + } + + // func TestMsgpackDecodePtr(t *testing.T) { + ts := newTestStruc(0, false) + b, err = testMarshalErr(ts, h, t, "pointer-to-struct") + if len(b) < 40 { + logT(t, "------- Size must be > 40. Size: %d", len(b)) + t.FailNow() + } + logT(t, "------- b: %v", b) + ts2 := new(TestStruc) + err = testUnmarshalErr(ts2, b, h, t, "pointer-to-struct") + if ts2.I64 != math.MaxInt64*2/3 { + logT(t, "------- Unmarshal wrong. Expect I64 = 64. Got: %v", ts2.I64) + t.FailNow() + } + + // func TestMsgpackIntfDecode(t *testing.T) { + m := map[string]int{"A": 2, "B": 3} + p := []interface{}{m} + bs, err := testMarshalErr(p, h, t, "p") + + m2 := map[string]int{} + p2 := []interface{}{m2} + err = testUnmarshalErr(&p2, bs, h, t, "&p2") + + if m2["A"] != 2 || m2["B"] != 3 { + logT(t, "m2 not as expected: expecting: %v, got: %v", m, m2) + t.FailNow() + } + // log("m: %v, m2: %v, p: %v, p2: %v", m, m2, p, p2) + checkEqualT(t, p, p2, "p=p2") + checkEqualT(t, m, m2, "m=m2") + if err = deepEqual(p, p2); err == nil { + logT(t, "p and p2 match") + } else { + logT(t, "Not Equal: %v. p: %v, p2: %v", err, p, p2) + t.FailNow() + } + if err = deepEqual(m, m2); err == nil { + logT(t, "m and m2 match") + } else { + logT(t, "Not Equal: %v. m: %v, m2: %v", err, m, m2) + t.FailNow() + } + + // func TestMsgpackDecodeStructSubset(t *testing.T) { + // test that we can decode a subset of the stream + mm := map[string]interface{}{"A": 5, "B": 99, "C": 333} + bs, err = testMarshalErr(mm, h, t, "mm") + type ttt struct { + A uint8 + C int32 + } + var t2 ttt + testUnmarshalErr(&t2, bs, h, t, "t2") + t3 := ttt{5, 333} + checkEqualT(t, t2, t3, "t2=t3") + + // println(">>>>>") + // test simple arrays, non-addressable arrays, slices + type tarr struct { + A int64 + B [3]int64 + C []byte + D [3]byte + } + var tarr0 = tarr{1, [3]int64{2, 3, 4}, []byte{4, 5, 6}, [3]byte{7, 8, 9}} + // test both pointer and non-pointer (value) + for _, tarr1 := range []interface{}{tarr0, &tarr0} { + bs, err = testMarshalErr(tarr1, h, t, "tarr1") + var tarr2 tarr + testUnmarshalErr(&tarr2, bs, h, t, "tarr2") + checkEqualT(t, tarr0, tarr2, "tarr0=tarr2") + // fmt.Printf(">>>> err: %v. tarr1: %v, tarr2: %v\n", err, tarr0, tarr2) + } + + // test byte array, even if empty (msgpack only) + if h == testMsgpackH { + type ystruct struct { + Anarray []byte + } + var ya = ystruct{} + testUnmarshalErr(&ya, []byte{0x91, 0x90}, h, t, "ya") + } +} + +func testCodecEmbeddedPointer(t *testing.T, h Handle) { + type Z int + type A struct { + AnInt int + } + type B struct { + *Z + *A + MoreInt int + } + var z Z = 4 + x1 := &B{&z, &A{5}, 6} + bs, err := testMarshalErr(x1, h, t, "x1") + // fmt.Printf("buf: len(%v): %x\n", buf.Len(), buf.Bytes()) + var x2 = new(B) + err = testUnmarshalErr(x2, bs, h, t, "x2") + err = checkEqualT(t, x1, x2, "x1=x2") + _ = err +} + +func doTestRpcOne(t *testing.T, rr Rpc, h Handle, doRequest bool, exitSleepMs time.Duration, +) (port int) { + // rpc needs EOF, which is sent via a panic, and so must be recovered. + if !recoverPanicToErr { + logT(t, "EXPECTED. set recoverPanicToErr=true, since rpc needs EOF") + t.FailNow() + } + srv := rpc.NewServer() + srv.Register(testRpcInt) + ln, err := net.Listen("tcp", "127.0.0.1:0") + // log("listener: %v", ln.Addr()) + checkErrT(t, err) + port = (ln.Addr().(*net.TCPAddr)).Port + // var opts *DecoderOptions + // opts := testDecOpts + // opts.MapType = mapStrIntfTyp + // opts.RawToString = false + serverExitChan := make(chan bool, 1) + var serverExitFlag uint64 = 0 + serverFn := func() { + for { + conn1, err1 := ln.Accept() + // if err1 != nil { + // //fmt.Printf("accept err1: %v\n", err1) + // continue + // } + if atomic.LoadUint64(&serverExitFlag) == 1 { + serverExitChan <- true + conn1.Close() + return // exit serverFn goroutine + } + if err1 == nil { + var sc rpc.ServerCodec = rr.ServerCodec(conn1, h) + srv.ServeCodec(sc) + } + } + } + + clientFn := func(cc rpc.ClientCodec) { + cl := rpc.NewClientWithCodec(cc) + defer cl.Close() + var up, sq, mult int + var rstr string + // log("Calling client") + checkErrT(t, cl.Call("TestRpcInt.Update", 5, &up)) + // log("Called TestRpcInt.Update") + checkEqualT(t, testRpcInt.i, 5, "testRpcInt.i=5") + checkEqualT(t, up, 5, "up=5") + checkErrT(t, cl.Call("TestRpcInt.Square", 1, &sq)) + checkEqualT(t, sq, 25, "sq=25") + checkErrT(t, cl.Call("TestRpcInt.Mult", 20, &mult)) + checkEqualT(t, mult, 100, "mult=100") + checkErrT(t, cl.Call("TestRpcInt.EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) + checkEqualT(t, rstr, fmt.Sprintf("%#v", TestABC{"Aa", "Bb", "Cc"}), "rstr=") + checkErrT(t, cl.Call("TestRpcInt.Echo123", []string{"A1", "B2", "C3"}, &rstr)) + checkEqualT(t, rstr, fmt.Sprintf("%#v", []string{"A1", "B2", "C3"}), "rstr=") + } + + connFn := func() (bs net.Conn) { + // log("calling f1") + bs, err2 := net.Dial(ln.Addr().Network(), ln.Addr().String()) + //fmt.Printf("f1. bs: %v, err2: %v\n", bs, err2) + checkErrT(t, err2) + return + } + + exitFn := func() { + atomic.StoreUint64(&serverExitFlag, 1) + bs := connFn() + <-serverExitChan + bs.Close() + // serverExitChan <- true + } + + go serverFn() + runtime.Gosched() + //time.Sleep(100 * time.Millisecond) + if exitSleepMs == 0 { + defer ln.Close() + defer exitFn() + } + if doRequest { + bs := connFn() + cc := rr.ClientCodec(bs, h) + clientFn(cc) + } + if exitSleepMs != 0 { + go func() { + defer ln.Close() + time.Sleep(exitSleepMs) + exitFn() + }() + } + return +} + +// Comprehensive testing that generates data encoded from python msgpack, +// and validates that our code can read and write it out accordingly. +// We keep this unexported here, and put actual test in ext_dep_test.go. +// This way, it can be excluded by excluding file completely. +func doTestMsgpackPythonGenStreams(t *testing.T) { + logT(t, "TestPythonGenStreams") + tmpdir, err := ioutil.TempDir("", "golang-msgpack-test") + if err != nil { + logT(t, "-------- Unable to create temp directory\n") + t.FailNow() + } + defer os.RemoveAll(tmpdir) + logT(t, "tmpdir: %v", tmpdir) + cmd := exec.Command("python", "msgpack_test.py", "testdata", tmpdir) + //cmd.Stdin = strings.NewReader("some input") + //cmd.Stdout = &out + var cmdout []byte + if cmdout, err = cmd.CombinedOutput(); err != nil { + logT(t, "-------- Error running msgpack_test.py testdata. Err: %v", err) + logT(t, " %v", string(cmdout)) + t.FailNow() + } + + oldMapType := testMsgpackH.MapType + for i, v := range tablePythonVerify { + testMsgpackH.MapType = oldMapType + //load up the golden file based on number + //decode it + //compare to in-mem object + //encode it again + //compare to output stream + logT(t, "..............................................") + logT(t, " Testing: #%d: %T, %#v\n", i, v, v) + var bss []byte + bss, err = ioutil.ReadFile(filepath.Join(tmpdir, strconv.Itoa(i)+".golden")) + if err != nil { + logT(t, "-------- Error reading golden file: %d. Err: %v", i, err) + failT(t) + continue + } + testMsgpackH.MapType = mapStrIntfTyp + + var v1 interface{} + if err = testUnmarshal(&v1, bss, testMsgpackH); err != nil { + logT(t, "-------- Error decoding stream: %d: Err: %v", i, err) + failT(t) + continue + } + if v == skipVerifyVal { + continue + } + //no need to indirect, because we pass a nil ptr, so we already have the value + //if v1 != nil { v1 = reflect.Indirect(reflect.ValueOf(v1)).Interface() } + if err = deepEqual(v, v1); err == nil { + logT(t, "++++++++ Objects match") + } else { + logT(t, "-------- Objects do not match: %v. Source: %T. Decoded: %T", err, v, v1) + logT(t, "-------- AGAINST: %#v", v) + logT(t, "-------- DECODED: %#v <====> %#v", v1, reflect.Indirect(reflect.ValueOf(v1)).Interface()) + failT(t) + } + bsb, err := testMarshal(v1, testMsgpackH) + if err != nil { + logT(t, "Error encoding to stream: %d: Err: %v", i, err) + failT(t) + continue + } + if err = deepEqual(bsb, bss); err == nil { + logT(t, "++++++++ Bytes match") + } else { + logT(t, "???????? Bytes do not match. %v.", err) + xs := "--------" + if reflect.ValueOf(v).Kind() == reflect.Map { + xs = " " + logT(t, "%s It's a map. Ok that they don't match (dependent on ordering).", xs) + } else { + logT(t, "%s It's not a map. They should match.", xs) + failT(t) + } + logT(t, "%s FROM_FILE: %4d] %v", xs, len(bss), bss) + logT(t, "%s ENCODED: %4d] %v", xs, len(bsb), bsb) + } + } + testMsgpackH.MapType = oldMapType +} + +// To test MsgpackSpecRpc, we test 3 scenarios: +// - Go Client to Go RPC Service (contained within TestMsgpackRpcSpec) +// - Go client to Python RPC Service (contained within doTestMsgpackRpcSpecGoClientToPythonSvc) +// - Python Client to Go RPC Service (contained within doTestMsgpackRpcSpecPythonClientToGoSvc) +// +// This allows us test the different calling conventions +// - Go Service requires only one argument +// - Python Service allows multiple arguments + +func doTestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { + openPort := "6789" + cmd := exec.Command("python", "msgpack_test.py", "rpc-server", openPort, "2") + checkErrT(t, cmd.Start()) + time.Sleep(100 * time.Millisecond) // time for python rpc server to start + bs, err2 := net.Dial("tcp", ":"+openPort) + checkErrT(t, err2) + cc := MsgpackSpecRpc.ClientCodec(bs, testMsgpackH) + cl := rpc.NewClientWithCodec(cc) + defer cl.Close() + var rstr string + checkErrT(t, cl.Call("EchoStruct", TestABC{"Aa", "Bb", "Cc"}, &rstr)) + //checkEqualT(t, rstr, "{'A': 'Aa', 'B': 'Bb', 'C': 'Cc'}") + var mArgs MsgpackSpecRpcMultiArgs = []interface{}{"A1", "B2", "C3"} + checkErrT(t, cl.Call("Echo123", mArgs, &rstr)) + checkEqualT(t, rstr, "1:A1 2:B2 3:C3", "rstr=") +} + +func doTestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { + port := doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, false, 1*time.Second) + //time.Sleep(1000 * time.Millisecond) + cmd := exec.Command("python", "msgpack_test.py", "rpc-client-go-service", strconv.Itoa(port)) + var cmdout []byte + var err error + if cmdout, err = cmd.CombinedOutput(); err != nil { + logT(t, "-------- Error running msgpack_test.py rpc-client-go-service. Err: %v", err) + logT(t, " %v", string(cmdout)) + t.FailNow() + } + checkEqualT(t, string(cmdout), + fmt.Sprintf("%#v\n%#v\n", []string{"A1", "B2", "C3"}, TestABC{"Aa", "Bb", "Cc"}), "cmdout=") +} + +func TestBincCodecsTable(t *testing.T) { + testCodecTableOne(t, testBincH) +} + +func TestBincCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testBincH) +} + +func TestBincCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testBincH) +} + +func TestSimpleCodecsTable(t *testing.T) { + testCodecTableOne(t, testSimpleH) +} + +func TestSimpleCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testSimpleH) +} + +func TestSimpleCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testSimpleH) +} + +func TestMsgpackCodecsTable(t *testing.T) { + testCodecTableOne(t, testMsgpackH) +} + +func TestMsgpackCodecsMisc(t *testing.T) { + testCodecMiscOne(t, testMsgpackH) +} + +func TestMsgpackCodecsEmbeddedPointer(t *testing.T) { + testCodecEmbeddedPointer(t, testMsgpackH) +} + +func TestBincRpcGo(t *testing.T) { + doTestRpcOne(t, GoRpc, testBincH, true, 0) +} + +func _TestSimpleRpcGo(t *testing.T) { + doTestRpcOne(t, GoRpc, testSimpleH, true, 0) +} + +func TestMsgpackRpcGo(t *testing.T) { + doTestRpcOne(t, GoRpc, testMsgpackH, true, 0) +} + +func TestMsgpackRpcSpec(t *testing.T) { + doTestRpcOne(t, MsgpackSpecRpc, testMsgpackH, true, 0) +} + +// TODO: +// Add Tests for: +// - decoding empty list/map in stream into a nil slice/map +// - binary(M|Unm)arsher support for time.Time diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go b/vendor/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go new file mode 100644 index 0000000000..bdf448d521 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/ext_dep_test.go @@ -0,0 +1,75 @@ +// //+build ignore + +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// This file includes benchmarks which have dependencies on 3rdparty +// packages (bson and vmihailenco/msgpack) which must be installed locally. +// +// To run the benchmarks including these 3rdparty packages, first +// - Uncomment first line in this file (put // // in front of it) +// - Get those packages: +// go get github.com/vmihailenco/msgpack +// go get labix.org/v2/mgo/bson +// - Run: +// go test -bi -bench=. + +import ( + "testing" + + vmsgpack "gopkg.in/vmihailenco/msgpack.v2" + "labix.org/v2/mgo/bson" +) + +func init() { + benchCheckers = append(benchCheckers, + benchChecker{"v-msgpack", fnVMsgpackEncodeFn, fnVMsgpackDecodeFn}, + benchChecker{"bson", fnBsonEncodeFn, fnBsonDecodeFn}, + ) +} + +func fnVMsgpackEncodeFn(ts interface{}) ([]byte, error) { + return vmsgpack.Marshal(ts) +} + +func fnVMsgpackDecodeFn(buf []byte, ts interface{}) error { + return vmsgpack.Unmarshal(buf, ts) +} + +func fnBsonEncodeFn(ts interface{}) ([]byte, error) { + return bson.Marshal(ts) +} + +func fnBsonDecodeFn(buf []byte, ts interface{}) error { + return bson.Unmarshal(buf, ts) +} + +func Benchmark__Bson_______Encode(b *testing.B) { + fnBenchmarkEncode(b, "bson", benchTs, fnBsonEncodeFn) +} + +func Benchmark__Bson_______Decode(b *testing.B) { + fnBenchmarkDecode(b, "bson", benchTs, fnBsonEncodeFn, fnBsonDecodeFn, fnBenchNewTs) +} + +func Benchmark__VMsgpack___Encode(b *testing.B) { + fnBenchmarkEncode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn) +} + +func Benchmark__VMsgpack___Decode(b *testing.B) { + fnBenchmarkDecode(b, "v-msgpack", benchTs, fnVMsgpackEncodeFn, fnVMsgpackDecodeFn, fnBenchNewTs) +} + +func TestMsgpackPythonGenStreams(t *testing.T) { + doTestMsgpackPythonGenStreams(t) +} + +func TestMsgpackRpcSpecGoClientToPythonSvc(t *testing.T) { + doTestMsgpackRpcSpecGoClientToPythonSvc(t) +} + +func TestMsgpackRpcSpecPythonClientToGoSvc(t *testing.T) { + doTestMsgpackRpcSpecPythonClientToGoSvc(t) +} diff --git a/vendor/github.com/hashicorp/go-msgpack/codec/z_helper_test.go b/vendor/github.com/hashicorp/go-msgpack/codec/z_helper_test.go new file mode 100644 index 0000000000..2e9b3a0f05 --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/codec/z_helper_test.go @@ -0,0 +1,103 @@ +// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved. +// Use of this source code is governed by a BSD-style license found in the LICENSE file. + +package codec + +// All non-std package dependencies related to testing live in this file, +// so porting to different environment is easy (just update functions). +// +// Also, this file is called z_helper_test, to give a "hint" to compiler +// that its init() function should be called last. (not guaranteed by spec) + +import ( + "errors" + "reflect" + "flag" + "testing" +) + +var ( + testLogToT = true + failNowOnFail = true +) + +func init() { + testInitFlags() + benchInitFlags() + flag.Parse() + testInit() + benchInit() +} + +func checkErrT(t *testing.T, err error) { + if err != nil { + logT(t, err.Error()) + failT(t) + } +} + +func checkEqualT(t *testing.T, v1 interface{}, v2 interface{}, desc string) (err error) { + if err = deepEqual(v1, v2); err != nil { + logT(t, "Not Equal: %s: %v. v1: %v, v2: %v", desc, err, v1, v2) + failT(t) + } + return +} + +func logT(x interface{}, format string, args ...interface{}) { + if t, ok := x.(*testing.T); ok && t != nil && testLogToT { + t.Logf(format, args...) + } else if b, ok := x.(*testing.B); ok && b != nil && testLogToT { + b.Logf(format, args...) + } else { + debugf(format, args...) + } +} + +func failT(t *testing.T) { + if failNowOnFail { + t.FailNow() + } else { + t.Fail() + } +} + +func deepEqual(v1, v2 interface{}) (err error) { + if !reflect.DeepEqual(v1, v2) { + err = errors.New("Not Match") + } + return +} + +func approxDataSize(rv reflect.Value) (sum int) { + switch rk := rv.Kind(); rk { + case reflect.Invalid: + case reflect.Ptr, reflect.Interface: + sum += int(rv.Type().Size()) + sum += approxDataSize(rv.Elem()) + case reflect.Slice: + sum += int(rv.Type().Size()) + for j := 0; j < rv.Len(); j++ { + sum += approxDataSize(rv.Index(j)) + } + case reflect.String: + sum += int(rv.Type().Size()) + sum += rv.Len() + case reflect.Map: + sum += int(rv.Type().Size()) + for _, mk := range rv.MapKeys() { + sum += approxDataSize(mk) + sum += approxDataSize(rv.MapIndex(mk)) + } + case reflect.Struct: + //struct size already includes the full data size. + //sum += int(rv.Type().Size()) + for j := 0; j < rv.NumField(); j++ { + sum += approxDataSize(rv.Field(j)) + } + default: + //pure value types + sum += int(rv.Type().Size()) + } + return +} diff --git a/vendor/github.com/hashicorp/go-msgpack/msgpack.org.md b/vendor/github.com/hashicorp/go-msgpack/msgpack.org.md new file mode 100644 index 0000000000..d5ebe71d6f --- /dev/null +++ b/vendor/github.com/hashicorp/go-msgpack/msgpack.org.md @@ -0,0 +1,47 @@ +**MessagePack and [Binc](http://github.com/ugorji/binc) Codec for [Go](http://golang.org) Language.** + +*A High Performance, Feature-Rich, Idiomatic encode/decode and rpc library*. + +To install: + + go get github.com/ugorji/go/codec + +Source: [http://github.com/ugorji/go] +Online documentation: [http://godoc.org/github.com/ugorji/go/codec] + +Typical usage: + +```go + // create and use decoder/encoder + var ( + v interface{} // value to decode/encode into + r io.Reader + w io.Writer + b []byte + mh codec.MsgpackHandle + ) + + dec = codec.NewDecoder(r, &mh) + dec = codec.NewDecoderBytes(b, &mh) + err = dec.Decode(&v) + + enc = codec.NewEncoder(w, &mh) + enc = codec.NewEncoderBytes(&b, &mh) + err = enc.Encode(v) + + //RPC Server + go func() { + for { + conn, err := listener.Accept() + rpcCodec := codec.GoRpc.ServerCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h) + rpc.ServeCodec(rpcCodec) + } + }() + + //RPC Communication (client side) + conn, err = net.Dial("tcp", "localhost:5555") + rpcCodec := codec.GoRpc.ClientCodec(conn, h) + //OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h) + client := rpc.NewClientWithCodec(rpcCodec) +``` diff --git a/vendor/github.com/hashicorp/go-multierror/append_test.go b/vendor/github.com/hashicorp/go-multierror/append_test.go new file mode 100644 index 0000000000..dfa79e289e --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/append_test.go @@ -0,0 +1,64 @@ +package multierror + +import ( + "errors" + "testing" +) + +func TestAppend_Error(t *testing.T) { + original := &Error{ + Errors: []error{errors.New("foo")}, + } + + result := Append(original, errors.New("bar")) + if len(result.Errors) != 2 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } + + original = &Error{} + result = Append(original, errors.New("bar")) + if len(result.Errors) != 1 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } + + // Test when a typed nil is passed + var e *Error + result = Append(e, errors.New("baz")) + if len(result.Errors) != 1 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } + + // Test flattening + original = &Error{ + Errors: []error{errors.New("foo")}, + } + + result = Append(original, Append(nil, errors.New("foo"), errors.New("bar"))) + if len(result.Errors) != 3 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } +} + +func TestAppend_NilError(t *testing.T) { + var err error + result := Append(err, errors.New("bar")) + if len(result.Errors) != 1 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } +} + +func TestAppend_NonError(t *testing.T) { + original := errors.New("foo") + result := Append(original, errors.New("bar")) + if len(result.Errors) != 2 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } +} + +func TestAppend_NonError_Error(t *testing.T) { + original := errors.New("foo") + result := Append(original, Append(nil, errors.New("bar"))) + if len(result.Errors) != 2 { + t.Fatalf("wrong len: %d", len(result.Errors)) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/flatten_test.go b/vendor/github.com/hashicorp/go-multierror/flatten_test.go new file mode 100644 index 0000000000..75218f1031 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/flatten_test.go @@ -0,0 +1,48 @@ +package multierror + +import ( + "errors" + "fmt" + "reflect" + "strings" + "testing" +) + +func TestFlatten(t *testing.T) { + original := &Error{ + Errors: []error{ + errors.New("one"), + &Error{ + Errors: []error{ + errors.New("two"), + &Error{ + Errors: []error{ + errors.New("three"), + }, + }, + }, + }, + }, + } + + expected := strings.TrimSpace(` +3 error(s) occurred: + +* one +* two +* three + `) + actual := fmt.Sprintf("%s", Flatten(original)) + + if expected != actual { + t.Fatalf("expected: %s, got: %s", expected, actual) + } +} + +func TestFlatten_nonError(t *testing.T) { + err := errors.New("foo") + actual := Flatten(err) + if !reflect.DeepEqual(actual, err) { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/format_test.go b/vendor/github.com/hashicorp/go-multierror/format_test.go new file mode 100644 index 0000000000..d7cee5d7d9 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/format_test.go @@ -0,0 +1,23 @@ +package multierror + +import ( + "errors" + "testing" +) + +func TestListFormatFunc(t *testing.T) { + expected := `2 error(s) occurred: + +* foo +* bar` + + errors := []error{ + errors.New("foo"), + errors.New("bar"), + } + + actual := ListFormatFunc(errors) + if actual != expected { + t.Fatalf("bad: %#v", actual) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/multierror_test.go b/vendor/github.com/hashicorp/go-multierror/multierror_test.go new file mode 100644 index 0000000000..3e78079c00 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/multierror_test.go @@ -0,0 +1,70 @@ +package multierror + +import ( + "errors" + "reflect" + "testing" +) + +func TestError_Impl(t *testing.T) { + var _ error = new(Error) +} + +func TestErrorError_custom(t *testing.T) { + errors := []error{ + errors.New("foo"), + errors.New("bar"), + } + + fn := func(es []error) string { + return "foo" + } + + multi := &Error{Errors: errors, ErrorFormat: fn} + if multi.Error() != "foo" { + t.Fatalf("bad: %s", multi.Error()) + } +} + +func TestErrorError_default(t *testing.T) { + expected := `2 error(s) occurred: + +* foo +* bar` + + errors := []error{ + errors.New("foo"), + errors.New("bar"), + } + + multi := &Error{Errors: errors} + if multi.Error() != expected { + t.Fatalf("bad: %s", multi.Error()) + } +} + +func TestErrorErrorOrNil(t *testing.T) { + err := new(Error) + if err.ErrorOrNil() != nil { + t.Fatalf("bad: %#v", err.ErrorOrNil()) + } + + err.Errors = []error{errors.New("foo")} + if v := err.ErrorOrNil(); v == nil { + t.Fatal("should not be nil") + } else if !reflect.DeepEqual(v, err) { + t.Fatalf("bad: %#v", v) + } +} + +func TestErrorWrappedErrors(t *testing.T) { + errors := []error{ + errors.New("foo"), + errors.New("bar"), + } + + multi := &Error{Errors: errors} + if !reflect.DeepEqual(multi.Errors, multi.WrappedErrors()) { + t.Fatalf("bad: %s", multi.WrappedErrors()) + } +} diff --git a/vendor/github.com/hashicorp/go-multierror/prefix_test.go b/vendor/github.com/hashicorp/go-multierror/prefix_test.go new file mode 100644 index 0000000000..1d4a6f6d33 --- /dev/null +++ b/vendor/github.com/hashicorp/go-multierror/prefix_test.go @@ -0,0 +1,33 @@ +package multierror + +import ( + "errors" + "testing" +) + +func TestPrefix_Error(t *testing.T) { + original := &Error{ + Errors: []error{errors.New("foo")}, + } + + result := Prefix(original, "bar") + if result.(*Error).Errors[0].Error() != "bar foo" { + t.Fatalf("bad: %s", result) + } +} + +func TestPrefix_NilError(t *testing.T) { + var err error + result := Prefix(err, "bar") + if result != nil { + t.Fatalf("bad: %#v", result) + } +} + +func TestPrefix_NonError(t *testing.T) { + original := errors.New("foo") + result := Prefix(original, "bar") + if result.Error() != "bar foo" { + t.Fatalf("bad: %s", result) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/.gitignore b/vendor/github.com/hashicorp/go-sockaddr/.gitignore new file mode 100644 index 0000000000..41720b86e3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +.cover.out* +coverage.html diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddr_test.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddr_test.go new file mode 100644 index 0000000000..45a0cc7887 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddr_test.go @@ -0,0 +1,594 @@ +package sockaddr_test + +import ( + "fmt" + "net" + "os" + "strings" + "testing" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +func boolEnvVar(envvar string, emptyDefault bool) bool { + v := os.Getenv(envvar) + switch strings.ToLower(v) { + case "": + return emptyDefault + case "0", "f", "n": + return false + case "1", "t", "y": + return true + default: + fmt.Fprintf(os.Stderr, "Unsupported %s flag %q", envvar, v) + return true + } +} + +// havePrivateIP is a helper function that returns true when we believe we +// should have a private IP address. This changes the failure mode of various +// tests that expect a private IP address. +// +// When you have a private IP assigned to the host, set the environment variable +// SOCKADDR_HAVE_PRIVATE_IP=1 +func havePrivateIP() bool { + return boolEnvVar("SOCKADDR_HAVE_PRIVATE_IP", true) +} + +// havePublicIP is a helper function that returns true when we believe we should +// have a public IP address. This changes the failure mode of various tests +// that expect a public IP address. +// +// When you have a public IP assigned to the host, set the environment variable +// SOCKADDR_HAVE_PUBLIC_IP=1 +func havePublicIP() bool { + return boolEnvVar("SOCKADDR_HAVE_PUBLIC_IP", false) +} + +func TestGetPrivateIP(t *testing.T) { + reportOnPrivate := func(args ...interface{}) { + if havePrivateIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + ip, err := sockaddr.GetPrivateIP() + if err != nil { + reportOnPrivate("unable to get a private IP: %v", err) + } + + if ip == "" { + reportOnPrivate("it's hard to test this reliably") + } +} + +func TestGetPrivateIPs(t *testing.T) { + reportOnPrivate := func(args ...interface{}) { + if havePrivateIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + ips, err := sockaddr.GetPrivateIPs() + if err != nil { + reportOnPrivate("unable to get a private IPs: %v", err) + } + + if ips == "" { + reportOnPrivate("it's hard to test this reliably") + } +} + +func TestGetPublicIP(t *testing.T) { + reportOnPublic := func(args ...interface{}) { + if havePublicIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + ip, err := sockaddr.GetPublicIP() + if err != nil { + reportOnPublic("unable to get a public IP: %v", err) + } + + if ip == "" { + reportOnPublic("it's hard to test this reliably") + } +} + +func TestGetPublicIPs(t *testing.T) { + reportOnPublic := func(args ...interface{}) { + if havePublicIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + ips, err := sockaddr.GetPublicIPs() + if err != nil { + reportOnPublic("unable to get a public IPs: %v", err) + } + + if ips == "" { + reportOnPublic("it's hard to test this reliably") + } +} + +func TestGetInterfaceIP(t *testing.T) { + ip, err := sockaddr.GetInterfaceIP(`^.*[\d]$`) + if err != nil { + t.Fatalf("regexp failed: %v", err) + } + + if ip == "" { + t.Skip("it's hard to test this reliably") + } +} + +func TestIfAddrAttr(t *testing.T) { + tests := []struct { + name string + ifAddr sockaddr.IfAddr + attr string + expected string + }{ + { + name: "name", + ifAddr: sockaddr.IfAddr{ + Interface: net.Interface{ + Name: "abc0", + }, + }, + attr: "name", + expected: "abc0", + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + + result, err := sockaddr.IfAttr(test.attr, test.ifAddr) + if err != nil { + t.Errorf("failed to get attr %q from %v", test.name, test.ifAddr) + } + + if result != test.expected { + t.Errorf("unexpected result") + } + } +} + +func TestIfAddrMath(t *testing.T) { + tests := []struct { + name string + ifAddr sockaddr.IfAddr + operation string + value string + expected string + wantFail bool + }{ + { + name: "ipv4 address +2", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "address", + value: "+2", + expected: "127.0.0.3/8", + }, + { + name: "ipv4 address -2", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "address", + value: "-2", + expected: "126.255.255.255/8", + }, + { + name: "ipv4 address + overflow 0xff00ff03", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "address", + value: fmt.Sprintf("+%d", 0xff00ff03), + expected: "126.0.255.4/8", + }, + { + name: "ipv4 address - underflow 0xff00ff04", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "address", + value: fmt.Sprintf("-%d", 0xff00ff04), + expected: "127.255.0.253/8", + }, + { + name: "ipv6 address +2", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "address", + value: "+2", + expected: "::3", + }, + { + name: "ipv6 address -3", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::4/128"), + }, + operation: "address", + value: "-3", + expected: "::1", + }, + { + name: "ipv6 address + overflow", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128"), + }, + operation: "address", + value: fmt.Sprintf("+%d", 0x03), + expected: "::2", + }, + { + name: "ipv6 address + underflow", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "address", + value: fmt.Sprintf("-%d", 0x03), + expected: "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe", + }, + { + name: "ipv4 network +2", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "network", + value: "+2", + expected: "127.0.0.2/8", + }, + { + name: "ipv4 network -2", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "network", + value: "-2", + expected: "127.255.255.254/8", + }, + { + // Value exceeds /8 + name: "ipv4 network + overflow 0xff00ff03", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "network", + value: fmt.Sprintf("+%d", 0xff00ff03), + expected: "127.0.255.3/8", + }, + { + // Value exceeds /8 + name: "ipv4 network - underflow+wrap 0xff00ff04", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "network", + value: fmt.Sprintf("-%d", 0xff00ff04), + expected: "127.255.0.252/8", + }, + { + name: "ipv6 network +6", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("fe80::1/64"), + }, + operation: "network", + value: "+6", + expected: "fe80::6/64", + }, + { + name: "ipv6 network -6", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("fe80::ff/64"), + }, + operation: "network", + value: "-6", + expected: "fe80::ffff:ffff:ffff:fffa/64", + }, + { + // Value exceeds /104 mask + name: "ipv6 network + overflow 0xff00ff03", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("fe80::1/104"), + }, + operation: "network", + value: fmt.Sprintf("+%d", 0xff00ff03), + expected: "fe80::ff03/104", + }, + { + // Value exceeds /104 + name: "ipv6 network - underflow+wrap 0xff00ff04", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("fe80::1/104"), + }, + operation: "network", + value: fmt.Sprintf("-%d", 0xff00ff04), + expected: "fe80::ff:fc/104", + }, + { + name: "ipv4 address missing sign", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "address", + value: "123", + wantFail: true, + }, + { + name: "ipv4 network missing sign", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "network", + value: "123", + wantFail: true, + }, + { + name: "ipv6 address missing sign", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "address", + value: "123", + wantFail: true, + }, + { + name: "ipv6 network missing sign", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "network", + value: "123", + wantFail: true, + }, + { + name: "ipv4 address bad value", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "address", + value: "+xyz", + wantFail: true, + }, + { + name: "ipv4 network bad value", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "network", + value: "-xyz", + wantFail: true, + }, + { + name: "ipv6 address bad value", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "address", + value: "+xyz", + wantFail: true, + }, + { + name: "ipv6 network bad value", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "network", + value: "-xyz", + wantFail: true, + }, + { + name: "ipv4 bad operation", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "gooz", + value: "+xyz", + wantFail: true, + }, + { + name: "ipv6 bad operation", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "frabba", + value: "+xyz", + wantFail: true, + }, + { + name: "ipv4 mask operand equals input ipv4 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.20.30.40/8"), + }, + operation: "mask", + value: "8", + expected: "10.0.0.0/8", + }, + { + name: "ipv4 mask operand larger than input ipv4 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("192.168.10.20/24"), + }, + operation: "mask", + value: "16", + expected: "192.168.0.0/16", + }, + { + name: "ipv4 host upper bound mask operand larger than input ipv4 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("192.168.255.255/24"), + }, + operation: "mask", + value: "16", + expected: "192.168.0.0/16", + }, + { + name: "ipv4 mask operand smaller than ipv4 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.20.30.40/8"), + }, + operation: "mask", + value: "16", + expected: "10.20.0.0/8", + }, + { + name: "ipv4 host upper bound mask operand smaller than input ipv4 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.20.255.255/8"), + }, + operation: "mask", + value: "16", + expected: "10.20.0.0/8", + }, + { + name: "ipv4 mask bad value upper bound", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "mask", + value: "33", + wantFail: true, + }, + { + name: "ipv4 mask bad value lower bound", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1/8"), + }, + operation: "mask", + value: "-1", + wantFail: true, + }, + { + name: "ipv6 mask operand equals input ipv6 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("2001:0db8:85a3::8a2e:0370:7334/64"), + }, + operation: "mask", + value: "64", + expected: "2001:db8:85a3::/64", + }, + { + name: "ipv6 mask operand larger than input ipv6 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("2001:0db8:85a3::8a2e:0370:7334/64"), + }, + operation: "mask", + value: "32", + expected: "2001:db8::/32", + }, + { + name: "ipv6 mask operand smaller than input ipv6 subnet mask", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("2001:0db8:85a3::8a2e:0370:7334/64"), + }, + operation: "mask", + value: "96", + expected: "2001:db8:85a3::8a2e:0:0/64", + }, + { + name: "ipv6 mask bad value upper bound", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "mask", + value: "129", + wantFail: true, + }, + { + name: "ipv6 mask bad value lower bound", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + operation: "mask", + value: "-1", + wantFail: true, + }, + { + name: "unix unsupported operation", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustUnixSock("/tmp/bar"), + }, + operation: "address", + value: "+123", + wantFail: true, + }, + { + name: "unix unsupported operation", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustUnixSock("/tmp/foo"), + }, + operation: "network", + value: "+123", + wantFail: true, + }, + { + name: "unix unsupported operation", + ifAddr: sockaddr.IfAddr{ + SockAddr: sockaddr.MustUnixSock("/tmp/foo"), + }, + operation: "mask", + value: "8", + wantFail: true, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + + results, err := sockaddr.IfAddrsMath(test.operation, test.value, sockaddr.IfAddrs{test.ifAddr}) + if test.wantFail { + if err != nil { + continue + } else { + t.Fatalf("%s: failed to fail math operation %q with value %q on %v", test.name, test.operation, test.value, test.ifAddr) + } + } else if err != nil { + t.Fatalf("%s: failed to compute math operation %q with value %q on %v", test.name, test.operation, test.value, test.ifAddr) + } + if len(results) != 1 { + t.Fatalf("%s: bad", test.name) + } + + result := results[0] + + switch saType := result.Type(); saType { + case sockaddr.TypeIPv4: + ipv4 := sockaddr.ToIPv4Addr(result.SockAddr) + if ipv4 == nil { + t.Fatalf("bad: %T %+#v", result, result) + } + + if got := ipv4.String(); got != test.expected { + t.Errorf("unexpected result %q: want %q got %q", test.name, test.expected, got) + } + case sockaddr.TypeIPv6: + ipv6 := sockaddr.ToIPv6Addr(result.SockAddr) + if ipv6 == nil { + t.Fatalf("bad: %T %+#v", result, result) + } + + if got := ipv6.String(); got != test.expected { + t.Errorf("unexpected result %q: want %q got %q", test.name, test.expected, got) + } + default: + t.Fatalf("bad") + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifaddrs_test.go b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs_test.go new file mode 100644 index 0000000000..aed8478080 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifaddrs_test.go @@ -0,0 +1,1987 @@ +package sockaddr_test + +import ( + "fmt" + "net" + "reflect" + "testing" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +const ( + // NOTE(seanc@): Assume "en0" is the interface with a default route attached + // to it. When this is not the case, change this one constant and tests + // should pass (i.e. "net0"). + ifNameWithDefault = "en0" +) + +// NOTE: A number of these code paths are exercised in template/ and +// cmd/sockaddr/. +// +// TODO(sean@): Add better coverage for filtering functions (e.g. ExcludeBy*, +// IncludeBy*). + +func TestCmpIfAddrFunc(t *testing.T) { + tests := []struct { + name string + t1 sockaddr.IfAddr // must come before t2 according to the ascOp + t2 sockaddr.IfAddr + ascOp sockaddr.CmpIfAddrFunc + ascResult int + descOp sockaddr.CmpIfAddrFunc + descResult int + }{ + { + name: "empty test", + t1: sockaddr.IfAddr{}, + t2: sockaddr.IfAddr{}, + ascOp: sockaddr.AscIfAddress, + descOp: sockaddr.DescIfAddress, + ascResult: 0, + descResult: 0, + }, + { + name: "ipv4 address less", + t1: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + }, + t2: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + ascOp: sockaddr.AscIfAddress, + descOp: sockaddr.DescIfAddress, + ascResult: -1, + descResult: -1, + }, + { + name: "ipv4 private", + t1: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.1.2.3"), + }, + t2: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("203.0.113.3"), + }, + ascOp: sockaddr.AscIfPrivate, + descOp: sockaddr.DescIfPrivate, + ascResult: 0, // not both private, can't complete the test + descResult: 0, + }, + { + name: "IfAddr name", + t1: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.1.2.3"), + Interface: net.Interface{ + Name: "abc0", + }, + }, + t2: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("203.0.113.3"), + Interface: net.Interface{ + Name: "xyz0", + }, + }, + ascOp: sockaddr.AscIfName, + descOp: sockaddr.DescIfName, + ascResult: -1, + descResult: -1, + }, + { + name: "IfAddr network size", + t1: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.0.0.0/8"), + }, + t2: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.0/24"), + }, + ascOp: sockaddr.AscIfNetworkSize, + descOp: sockaddr.DescIfNetworkSize, + ascResult: -1, + descResult: -1, + }, + { + name: "IfAddr port", + t1: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.0.0.0:80"), + }, + t2: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("127.0.0.0:8600"), + }, + ascOp: sockaddr.AscIfPort, + descOp: sockaddr.DescIfPort, + ascResult: -1, + descResult: -1, + }, + { + name: "IfAddr type", + t1: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.0.0.0:80"), + }, + t2: sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("[::1]:80"), + }, + ascOp: sockaddr.AscIfType, + descOp: sockaddr.DescIfType, + ascResult: -1, + descResult: -1, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + + // Test ascending operation + ascExpected := test.ascResult + ascResult := test.ascOp(&test.t1, &test.t2) + if ascResult != ascExpected { + t.Errorf("%s: Unexpected result %d, expected %d when comparing %v and %v using %v", test.name, ascResult, ascExpected, test.t1, test.t2, test.ascOp) + } + + // Test descending operation + descExpected := test.descResult + descResult := test.descOp(&test.t2, &test.t1) + if descResult != descExpected { + t.Errorf("%s: Unexpected result %d, expected %d when comparing %v and %v using %v", test.name, descResult, descExpected, test.t1, test.t2, test.descOp) + } + + if ascResult != descResult { + t.Fatalf("bad") + } + + // Reverse the args + ascExpected = -1 * test.ascResult + ascResult = test.ascOp(&test.t2, &test.t1) + if ascResult != ascExpected { + t.Errorf("%s: Unexpected result %d, expected %d when comparing %v and %v using %v", test.name, ascResult, ascExpected, test.t1, test.t2, test.ascOp) + } + + descExpected = -1 * test.descResult + descResult = test.descOp(&test.t1, &test.t2) + if descResult != descExpected { + t.Errorf("%s: Unexpected result %d, expected %d when comparing %v and %v using %v", test.name, descResult, descExpected, test.t1, test.t2, test.descOp) + } + + if ascResult != descResult { + t.Fatalf("bad") + } + + // Test equality + ascExpected = 0 + ascResult = test.ascOp(&test.t1, &test.t1) + if ascResult != ascExpected { + t.Errorf("%s: Unexpected result %d, expected %d when comparing %v and %v using %v", test.name, ascResult, ascExpected, test.t1, test.t2, test.ascOp) + } + + descExpected = 0 + descResult = test.descOp(&test.t1, &test.t1) + if descResult != descExpected { + t.Errorf("%s: Unexpected result %d, expected %d when comparing %v and %v using %v", test.name, descResult, descExpected, test.t1, test.t2, test.descOp) + } + } +} + +func TestFilterIfByFlags(t *testing.T) { + tests := []struct { + name string + selector string + ifAddrs sockaddr.IfAddrs + flags net.Flags + fail bool + }{ + { + name: "broadcast", + selector: "broadcast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagBroadcast, + }, + SockAddr: sockaddr.MustIPv4Addr("1.2.3.1"), + }, + }, + }, + { + name: "down", + selector: "down", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv4Addr("1.2.3.2"), + }, + }, + }, + { + name: "forwardable IPv4", + selector: "forwardable", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + }, + }, + }, + { + name: "forwardable IPv6", + selector: "forwardable", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv6Addr("cc::1/128"), + }, + }, + }, + { + name: "global unicast", + selector: "global unicast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv6Addr("cc::2"), + }, + }, + }, + { + name: "interface-local multicast", + selector: "interface-local multicast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv6Addr("ff01::2"), + }, + }, + }, + { + name: "link-local multicast", + selector: "link-local multicast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv6Addr("ff02::3"), + }, + }, + }, + { + name: "link-local unicast IPv4", + selector: "link-local unicast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv4Addr("169.254.1.101"), + }, + }, + }, + { + name: "link-local unicast IPv6", + selector: "link-local unicast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv6Addr("fe80::3"), + }, + }, + }, + { + name: "loopback ipv4", + selector: "loopback", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagLoopback, + }, + SockAddr: sockaddr.MustIPv4Addr("127.0.0.1"), + }, + }, + }, + { + name: "loopback ipv6", + selector: "loopback", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagLoopback, + }, + SockAddr: sockaddr.MustIPv6Addr("::1"), + }, + }, + }, + { + name: "multicast IPv4", + selector: "multicast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagMulticast, + }, + SockAddr: sockaddr.MustIPv4Addr("224.0.0.1"), + }, + }, + }, + { + name: "multicast IPv6", + selector: "multicast", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagMulticast, + }, + SockAddr: sockaddr.MustIPv6Addr("ff05::3"), + }, + }, + }, + { + name: "point-to-point", + selector: "point-to-point", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagPointToPoint, + }, + SockAddr: sockaddr.MustIPv6Addr("cc::3"), + }, + }, + }, + { + name: "unspecified", + selector: "unspecified", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv6Addr("::"), + }, + }, + }, + { + name: "up", + selector: "up", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagUp, + }, + SockAddr: sockaddr.MustIPv6Addr("cc::3"), + }, + }, + }, + { + name: "invalid", + selector: "foo", + fail: true, + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{}, + SockAddr: sockaddr.MustIPv6Addr("cc::3"), + }, + }, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + t.Run(test.name, func(t *testing.T) { + in, out, err := sockaddr.IfByFlag(test.selector, test.ifAddrs) + if test.fail == true && err == nil { + t.Fatalf("%s: expected failure", test.name) + } else if test.fail == true && err != nil { + return + } + + if err != nil && test.fail != true { + t.Fatalf("%s: failed: %v", test.name, err) + } + if ilen := len(in); ilen != 1 { + t.Fatalf("%s: wrong in length %d, expected 1", test.name, ilen) + } + if olen := len(out); olen != 0 { + t.Fatalf("%s: wrong in length %d, expected 0", test.name, olen) + } + }) + } +} + +func TestIfByNetwork(t *testing.T) { + tests := []struct { + name string + input sockaddr.IfAddrs + selector string + matched sockaddr.IfAddrs + excluded sockaddr.IfAddrs + fail bool + }{ + { + name: "exact match", + input: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + }, + selector: "1.2.3.4", + matched: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + }, + }, + { + name: "exact match plural", + input: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.0/24"), + }, + }, + selector: "1.2.3.0/24", + matched: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.0/24"), + }, + }, + }, + { + name: "split plural", + input: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("12.2.3.0/24"), + }, + }, + selector: "1.2.3.0/24", + excluded: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("12.2.3.0/24"), + }, + }, + matched: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + }, + }, + { + name: "excluded plural", + input: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("12.2.3.0/24"), + }, + }, + selector: "10.0.0.0/8", + excluded: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("12.2.3.0/24"), + }, + }, + }, + { + name: "invalid selector", + input: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("12.2.3.0/24"), + }, + }, + selector: "[:]", + fail: true, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + t.Run(test.name, func(t *testing.T) { + matched, excluded, err := sockaddr.IfByNetwork(test.selector, test.input) + if err != nil && !test.fail { + t.Fatal("bad") + } else if err == nil && test.fail { + t.Fatal("bad") + } + + if len(test.matched) != len(matched) { + t.Fatal("bad") + } else if len(test.excluded) != len(excluded) { + t.Fatal("bad") + } + + for i := 0; i < len(test.excluded); i++ { + if !reflect.DeepEqual(test.excluded[i], excluded[i]) { + t.Errorf("wrong excluded: %d %v %v", i, test.excluded[i], excluded[i]) + } + } + + for i := 0; i < len(test.matched); i++ { + if !reflect.DeepEqual(test.matched[i], matched[i]) { + t.Errorf("wrong matched: %d %v %v", i, test.matched[i], matched[i]) + } + } + }) + } +} + +func TestFilterIfByType(t *testing.T) { + tests := []struct { + name string + ifAddrs sockaddr.IfAddrs + ifAddrType sockaddr.SockAddrType + matchedLen int + remainingLen int + }{ + { + name: "include all", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("2.3.4.5"), + }, + }, + ifAddrType: sockaddr.TypeIPv4, + matchedLen: 2, + remainingLen: 0, + }, + { + name: "include some", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1"), + }, + }, + ifAddrType: sockaddr.TypeIPv4, + matchedLen: 1, + remainingLen: 1, + }, + { + name: "exclude all", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.5"), + }, + }, + ifAddrType: sockaddr.TypeIPv6, + matchedLen: 0, + remainingLen: 2, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + in, out := sockaddr.FilterIfByType(test.ifAddrs, test.ifAddrType) + if len(in) != test.matchedLen { + t.Fatalf("%s: wrong length %d, expected %d", test.name, len(in), test.matchedLen) + } + + if len(out) != test.remainingLen { + t.Fatalf("%s: wrong length %d, expected %d", test.name, len(out), test.remainingLen) + } + } +} + +// TestGetIfAddrs runs through the motions of calling sockaddr.GetIfAddrs(), but +// doesn't do much in the way of testing beyond verifying that `lo0` has a +// loopback address present. +func TestGetIfAddrs(t *testing.T) { + ifAddrs, err := sockaddr.GetAllInterfaces() + if err != nil { + t.Fatalf("Unable to proceed: %v", err) + } + if len(ifAddrs) == 0 { + t.Skip() + } + + var loInt *sockaddr.IfAddr + for _, ifAddr := range ifAddrs { + val := sockaddr.IfAddrAttr(ifAddr, "name") + if val == "" { + t.Fatalf("name failed") + } else if val == "lo0" || val == "lo" || val == "Loopback Pseudo-Interface 1" { + loInt = &ifAddr + break + } + } + if loInt == nil { + t.Fatalf("No loopback interfaces found, loInt nil") + } + + if val := sockaddr.IfAddrAttr(*loInt, "flags"); !(val == "up|loopback|multicast" || val == "up|loopback") { + t.Fatalf("expected different flags from loopback: %q", val) + } + + if loInt == nil { + t.Fatalf("Expected to find an lo0 interface, didn't find any") + } + + haveIPv4, foundIPv4lo := false, false + haveIPv6, foundIPv6lo := false, false + switch loInt.SockAddr.(type) { + case sockaddr.IPv4Addr: + haveIPv4 = true + + // Make the semi-brittle assumption that if we have + // IPv4, we also have an address at 127.0.0.1 available + // to us. + if loInt.SockAddr.String() == "127.0.0.1/8" { + foundIPv4lo = true + } + case sockaddr.IPv6Addr: + haveIPv6 = true + if loInt.SockAddr.String() == "::1" { + foundIPv6lo = true + } + default: + t.Fatalf("Unsupported type %v for address %v", loInt.Type(), loInt) + } + + // While not wise, it's entirely possible a host doesn't have IPv4 + // enabled. + if haveIPv4 && !foundIPv4lo { + t.Fatalf("Had an IPv4 w/o an expected IPv4 loopback addresses") + } + + // While prudent to run without, a sane environment may still contain an + // IPv6 loopback address. + if haveIPv6 && !foundIPv6lo { + t.Fatalf("Had an IPv6 w/o an expected IPv6 loopback addresses") + } +} + +// TestGetDefaultIfName tests to make sure a default interface name is always +// returned from getDefaultIfName(). +func TestGetDefaultInterface(t *testing.T) { + reportOnDefault := func(args ...interface{}) { + if havePublicIP() || havePrivateIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + + ifAddrs, err := sockaddr.GetDefaultInterfaces() + if err != nil { + switch { + case len(ifAddrs) == 0: + reportOnDefault("bad: %v", err) + case ifAddrs[0].Flags&net.FlagUp == 0: + reportOnDefault("bad: %v", err) + default: + reportOnDefault("bad: %v", err) + } + } +} + +func TestIfAddrAttrs(t *testing.T) { + const expectedNumAttrs = 2 + attrs := sockaddr.IfAddrAttrs() + if len(attrs) != expectedNumAttrs { + t.Fatalf("wrong number of attrs") + } + + tests := []struct { + name string + ifAddr sockaddr.IfAddr + attr string + expected string + }{ + { + name: "name", + ifAddr: sockaddr.IfAddr{ + Interface: net.Interface{ + Name: "abc0", + }, + }, + attr: "name", + expected: "abc0", + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + + result, err := sockaddr.IfAttrs(test.attr, sockaddr.IfAddrs{test.ifAddr}) + if err != nil { + t.Errorf("failed to get attr %q from %v", test.name, test.ifAddr) + } + + if result != test.expected { + t.Errorf("unexpected result") + } + } + + // Test an empty array + result, err := sockaddr.IfAttrs("name", sockaddr.IfAddrs{}) + if err != nil { + t.Error(`failed to get attr "name" from an empty array`) + } + + if result != "" { + t.Errorf("unexpected result") + } +} + +func TestGetAllInterfaces(t *testing.T) { + ifAddrs, err := sockaddr.GetAllInterfaces() + if err != nil { + t.Fatalf("unable to gather interfaces: %v", err) + } + + initialLen := len(ifAddrs) + if initialLen == 0 { + t.Fatalf("no interfaces available") + } + + ifAddrs, err = sockaddr.SortIfBy("name,type,port,size,address", ifAddrs) + if err != nil { + t.Fatalf("unable to initially sort address") + } + + ascSorted, err := sockaddr.SortIfBy("name,type,port,size,address", ifAddrs) + if err != nil { + t.Fatalf("unable to asc sort address") + } + + descSorted, err := sockaddr.SortIfBy("name,type,port,size,-address", ascSorted) + if err != nil { + t.Fatalf("unable to desc sort address") + } + + if initialLen != len(ascSorted) && len(ascSorted) != len(descSorted) { + t.Fatalf("wrong len") + } + + for i := initialLen - 1; i >= 0; i-- { + if !reflect.DeepEqual(descSorted[i], ifAddrs[i]) { + t.Errorf("wrong sort order: %d %v %v", i, descSorted[i], ifAddrs[i]) + } + } +} + +func TestGetDefaultInterfaces(t *testing.T) { + reportOnDefault := func(args ...interface{}) { + if havePublicIP() || havePrivateIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + + ifAddrs, err := sockaddr.GetDefaultInterfaces() + if err != nil { + reportOnDefault("unable to gather default interfaces: %v", err) + } + + if len(ifAddrs) == 0 { + reportOnDefault("no default interfaces available", nil) + } +} + +func TestGetPrivateInterfaces(t *testing.T) { + reportOnPrivate := func(args ...interface{}) { + if havePrivateIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + + ifAddrs, err := sockaddr.GetPrivateInterfaces() + if err != nil { + reportOnPrivate("failed: %v", err) + } + + if len(ifAddrs) == 0 { + reportOnPrivate("no public IPs found") + } + + if len(ifAddrs[0].String()) == 0 { + reportOnPrivate("no string representation of private IP found") + } +} + +func TestGetPublicInterfaces(t *testing.T) { + reportOnPublic := func(args ...interface{}) { + if havePublicIP() { + t.Fatalf(args[0].(string), args[1:]...) + } else { + t.Skipf(args[0].(string), args[1:]...) + } + } + + ifAddrs, err := sockaddr.GetPublicInterfaces() + if err != nil { + reportOnPublic("failed: %v", err) + } + + if len(ifAddrs) == 0 { + reportOnPublic("no public IPs found") + } +} + +func TestIncludeExcludeIfs(t *testing.T) { + tests := []struct { + name string + ifAddrs sockaddr.IfAddrs + fail bool + excludeNum int + excludeName string + excludeParam string + includeName string + includeParam string + includeNum int + }{ + { + name: "address", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("2.3.4.5"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("3.4.5.6"), + }, + }, + excludeName: "address", + excludeNum: 2, + excludeParam: `^1\..*\.4$`, + includeName: "address", + includeNum: 1, + includeParam: `^1\.2\.3\.`, + }, + { + name: "address invalid", + fail: true, + excludeName: "address", + excludeNum: 0, + excludeParam: `*`, + includeName: "address", + includeNum: 0, + includeParam: `[`, + }, + { + name: "flag", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagUp | net.FlagLoopback, + }, + }, + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagLoopback, + }, + }, + sockaddr.IfAddr{ + Interface: net.Interface{ + Flags: net.FlagMulticast, + }, + }, + }, + excludeName: "flags", + excludeNum: 2, + excludeParam: `up|loopback`, + includeName: "flags", + includeNum: 2, + includeParam: `loopback`, + }, + { + name: "flag invalid", + fail: true, + excludeName: "foo", + excludeNum: 0, + excludeParam: `*`, + includeName: "bar", + includeNum: 0, + includeParam: `[`, + }, + { + name: "name", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + Interface: net.Interface{ + Name: "abc0", + }, + }, + sockaddr.IfAddr{ + Interface: net.Interface{ + Name: "xyz0", + }, + }, + sockaddr.IfAddr{ + Interface: net.Interface{ + Name: "docker666", + }, + }, + }, + excludeName: "name", + excludeNum: 2, + excludeParam: `^docker[\d]+$`, + includeName: "name", + includeNum: 2, + includeParam: `^([a-z]+)0$`, + }, + { + name: "name invalid", + fail: true, + excludeName: "name", + excludeNum: 0, + excludeParam: `*`, + includeName: "name", + includeNum: 0, + includeParam: `[`, + }, + { + name: "network", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.2.3.4/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.255.255.4/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("::1"), + }, + }, + excludeName: "network", + excludeNum: 1, + excludeParam: `10.0.0.0/8`, + includeName: "network", + includeNum: 1, + includeParam: `::/127`, + }, + { + name: "port", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:8600"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("2.3.4.5:4646"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("3.4.5.6:4647"), + }, + }, + excludeName: "port", + excludeNum: 2, + excludeParam: `0$`, + includeName: "port", + includeNum: 2, + includeParam: `^46[\d]{2}$`, + }, + { + name: "port invalid", + fail: true, + excludeName: "port", + excludeNum: 0, + excludeParam: `*`, + includeName: "port", + includeNum: 0, + includeParam: `[`, + }, + { + name: "rfc", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.2.3.4/24"), + }, + sockaddr.IfAddr{ + // Excluded (/127 vs /128) + SockAddr: sockaddr.MustIPv6Addr("::1/127"), + }, + sockaddr.IfAddr{ + // Excluded (/127 vs /128) + SockAddr: sockaddr.MustIPv6Addr("::/127"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("203.0.113.0/24"), + }, + }, + excludeName: "rfc", + excludeNum: 2, + excludeParam: `6890`, + includeName: "rfc", + includeNum: 1, + includeParam: `3330`, + }, + { + name: "rfc invalid", + fail: true, + excludeName: "rfc", + excludeNum: 0, + excludeParam: `rfcOneTwoThree`, + includeName: "rfc", + includeNum: 0, + includeParam: `99999999999999`, + }, + { + name: "rfc IPv4 exclude", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("192.169.1.1"), + }, + }, + excludeName: "rfc", + excludeNum: 1, + excludeParam: `1918`, + includeName: "rfc", + includeNum: 0, + includeParam: `1918`, + }, + { + name: "rfc IPv4 include", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("192.168.1.1"), + }, + }, + excludeName: "rfc", + excludeNum: 0, + excludeParam: `1918`, + includeName: "rfc", + includeNum: 1, + includeParam: `1918`, + }, + { + name: "rfc IPv4 excluded RFCs", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("192.168.1.1"), + }, + }, + excludeName: "rfc", + excludeNum: 1, + excludeParam: `4291`, + includeName: "rfc", + includeNum: 0, + includeParam: `4291`, + }, + { + name: "rfc IPv6 exclude", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("cc::1/127"), + }, + }, + excludeName: "rfc", + excludeNum: 1, + excludeParam: `4291`, + includeName: "rfc", + includeNum: 0, + includeParam: `4291`, + }, + { + name: "rfc IPv6 include", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/127"), + }, + }, + excludeName: "rfc", + excludeNum: 0, + excludeParam: `4291`, + includeName: "rfc", + includeNum: 1, + includeParam: `4291`, + }, + { + name: "rfc zero match", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + }, + }, + excludeName: "rfc", + excludeNum: 1, + excludeParam: `1918`, + includeName: "rfc", + includeNum: 0, + includeParam: `1918`, + }, + { + name: "rfc empty list", + ifAddrs: sockaddr.IfAddrs{}, + excludeName: "rfc", + excludeNum: 0, + excludeParam: `4291`, + includeName: "rfc", + includeNum: 0, + includeParam: `1918`, + }, + { + name: "size", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.2.3.4/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("203.0.113.0/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/24"), + }, + }, + excludeName: "size", + excludeParam: `24`, + excludeNum: 0, + includeName: "size", + includeParam: `24`, + includeNum: 3, + }, + { + name: "size invalid", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.2.3.4/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/128"), + }, + }, + fail: true, + excludeName: "size", + excludeParam: `33`, + excludeNum: 0, + includeName: "size", + includeParam: `-1`, + includeNum: 0, + }, + { + name: "type", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.2.3.4/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("203.0.113.0/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv6Addr("::1/127"), + }, + }, + excludeName: "type", + excludeParam: `ipv6`, + excludeNum: 2, + includeName: "type", + includeParam: `ipv4`, + includeNum: 2, + }, + { + name: "type", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("10.2.3.4/24"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("::1"), + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustUnixSock("/tmp/foo"), + }, + }, + excludeName: "type", + excludeParam: `ip`, + excludeNum: 1, + includeName: "type", + includeParam: `unix`, + includeNum: 1, + }, + { + name: "type invalid arg", + fail: true, + excludeName: "type", + excludeParam: `*`, + excludeNum: 0, + includeName: "type", + includeParam: `[`, + includeNum: 0, + }, + { + name: "type invalid", + fail: true, + excludeName: "foo", + excludeParam: `bar`, + excludeNum: 0, + includeName: "baz", + includeParam: `bur`, + includeNum: 0, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + t.Run(fmt.Sprintf("%s-%s", test.name, "include"), func(t *testing.T) { + t.Logf("test.ifAddrs: %v", test.ifAddrs) + inIfAddrs, err := sockaddr.IncludeIfs(test.includeName, test.includeParam, test.ifAddrs) + t.Logf("inIfAddrs: %v", inIfAddrs) + + switch { + case !test.fail && err != nil: + t.Errorf("%s: failed unexpectedly: %v", test.name, err) + case test.fail && err == nil: + t.Errorf("%s: failed to throw an error", test.name) + case test.fail && err != nil: + // expected test failure + return + } + + if len(inIfAddrs) != test.includeNum { + t.Errorf("%s: failed include length check. Expected %d, got %d. Input: %q", test.name, test.includeNum, len(inIfAddrs), test.includeParam) + } + }) + + t.Run(fmt.Sprintf("%s-%s", test.name, "exclude"), func(t *testing.T) { + t.Logf("test.ifAddrs: %v", test.ifAddrs) + outIfAddrs, err := sockaddr.ExcludeIfs(test.excludeName, test.excludeParam, test.ifAddrs) + t.Logf("outIfAddrs: %v", outIfAddrs) + + switch { + case !test.fail && err != nil: + t.Errorf("%s: failed unexpectedly: %v", test.name, err) + case test.fail && err == nil: + t.Errorf("%s: failed to throw an error", test.name) + case test.fail && err != nil: + // expected test failure + return + } + + if len(outIfAddrs) != test.excludeNum { + t.Errorf("%s: failed exclude length check. Expected %d, got %d. Input: %q", test.name, test.excludeNum, len(outIfAddrs), test.excludeParam) + } + }) + } +} + +func TestNewIPAddr(t *testing.T) { + tests := []struct { + name string + input string + output string + pass bool + }{ + { + name: "ipv4", + input: "1.2.3.4", + output: "1.2.3.4", + pass: true, + }, + { + name: "ipv6", + input: "::1", + output: "::1", + pass: true, + }, + { + name: "invalid", + input: "255.255.255.256", + output: "", + pass: false, + }, + } + + for _, test := range tests { + ip, err := sockaddr.NewIPAddr(test.input) + switch { + case err == nil && test.pass, + err != nil && !test.pass: + + default: + t.Errorf("expected %s's success to be %t", test.input, test.pass) + } + + if !test.pass { + continue + } + + ipStr := ip.String() + if ipStr != test.output { + t.Errorf("Expected %q to match %q", test.input, test.output, ipStr) + } + + } +} + +func TestIPAttrs(t *testing.T) { + const expectedIPAttrs = 11 + ipAttrs := sockaddr.IPAttrs() + if len(ipAttrs) != expectedIPAttrs { + t.Fatalf("wrong number of args") + } +} + +func TestUniqueIfAddrsBy(t *testing.T) { + tests := []struct { + name string + ifAddrs sockaddr.IfAddrs + fail bool + selector string + expected []string + }{ + { + name: "address", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("203.0.113.0/24"), + Interface: net.Interface{ + Name: "abc0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("203.0.113.0/24"), + Interface: net.Interface{ + Name: "abc0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("10.2.3.4"), + Interface: net.Interface{ + Name: "foo1", + }, + }, + }, + selector: "address", + expected: []string{"203.0.113.0/24 {0 0 abc0 0}", "10.2.3.4 {0 0 foo1 0}"}, + }, + { + name: "name", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("::1"), + Interface: net.Interface{ + Name: "lo0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("fe80::1"), + Interface: net.Interface{ + Name: "lo0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("127.0.0.1"), + Interface: net.Interface{ + Name: "foo1", + }, + }, + }, + selector: "name", + expected: []string{"::1 {0 0 lo0 0}", "127.0.0.1 {0 0 foo1 0}"}, + }, + { + name: "invalid", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{}, + }, + fail: true, + selector: "goozfraba", + expected: []string{}, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + t.Run(test.name, func(t *testing.T) { + + uniqueAddrs, err := sockaddr.UniqueIfAddrsBy(test.selector, test.ifAddrs) + switch { + case !test.fail && err != nil: + t.Fatalf("%s: failed unexpectedly: %v", test.name, err) + case test.fail && err == nil: + t.Fatalf("%s: failed to throw an error", test.name) + case test.fail && err != nil: + // expected test failure + return + } + + if len(uniqueAddrs) != len(test.expected) { + t.Fatalf("%s: failed uniquify by attribute %s", test.name, test.selector) + } + + for i := 0; i < len(uniqueAddrs); i++ { + got := uniqueAddrs[i].String() + if got != test.expected[i] { + t.Fatalf("%s: expected %q got %q", test.name, test.expected[i], got) + } + } + + }) + } +} + +func TestJoinIfAddrsBy(t *testing.T) { + tests := []struct { + name string + ifAddrs sockaddr.IfAddrs + fail bool + selector string + joinStr string + expected string + }{ + { + name: "address", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("203.0.113.0/24"), + Interface: net.Interface{ + Name: "abc0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("203.0.113.1"), + Interface: net.Interface{ + Name: "abc0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("10.2.3.4"), + Interface: net.Interface{ + Name: "foo1", + }, + }, + }, + selector: "address", + joinStr: " ", + expected: "203.0.113.0 203.0.113.1 10.2.3.4", + }, + { + name: "name", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("::1"), + Interface: net.Interface{ + Name: "lo0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("fe80::1"), + Interface: net.Interface{ + Name: "foo0", + }, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("127.0.0.1"), + Interface: net.Interface{ + Name: "bar2", + }, + }, + }, + selector: "name", + joinStr: "-/-", + expected: "lo0-/-foo0-/-bar2", + }, + { + name: "invalid", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPAddr("127.0.0.1"), + Interface: net.Interface{ + Name: "bar2", + }, + }, + }, + fail: true, + selector: "goozfraba", + expected: "", + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + t.Run(test.name, func(t *testing.T) { + + result, err := sockaddr.JoinIfAddrs(test.selector, test.joinStr, test.ifAddrs) + switch { + case !test.fail && err != nil: + t.Fatalf("%s: failed unexpectedly: %v", test.name, err) + case test.fail && err == nil: + t.Fatalf("%s: failed to throw an error", test.name) + case test.fail && err != nil: + // expected test failure + return + } + + if result != test.expected { + t.Fatalf("%s: expected %q got %q", test.name, test.expected, result) + } + + }) + } +} + +func TestLimitOffset(t *testing.T) { + tests := []struct { + name string + ifAddrs sockaddr.IfAddrs + limit uint + offset int + fail bool + expected sockaddr.IfAddrs + }{ + { + name: "basic limit offset", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.0/24")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.2")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.3")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.4")}, + }, + limit: 2, + offset: 1, + expected: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.2")}, + }, + }, + { + name: "negative offset with limit", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.0/24")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.2")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.3")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.4")}, + }, + limit: 2, + offset: -3, + expected: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.2")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.3")}, + }, + }, + { + name: "large limit", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.0/24")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.2")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.3")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.4")}, + }, + limit: 100, + offset: 3, + expected: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.3")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.4")}, + }, + }, + { + name: "bigger offset than size", + ifAddrs: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.0/24")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPAddr("203.0.113.1")}, + }, + fail: true, + limit: 1, + offset: 3, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + t.Run(test.name, func(t *testing.T) { + + offsetResults, err := sockaddr.OffsetIfAddrs(test.offset, test.ifAddrs) + switch { + case !test.fail && err != nil: + t.Fatalf("%s: failed unexpectedly: %v", test.name, err) + case test.fail && err == nil: + t.Fatalf("%s: failed to throw an error", test.name) + case test.fail && err != nil: + // expected test failure + return + } + + limitResults, err := sockaddr.LimitIfAddrs(test.limit, offsetResults) + switch { + case !test.fail && err != nil: + t.Fatalf("%s: failed unexpectedly: %v", test.name, err) + case test.fail && err == nil: + t.Fatalf("%s: failed to throw an error", test.name) + case test.fail && err != nil: + // expected test failure + return + } + + if len(test.expected) != len(limitResults) { + t.Fatalf("bad") + } + + for i := 0; i < len(test.expected); i++ { + if !reflect.DeepEqual(limitResults[i], test.expected[i]) { + t.Errorf("objects in ordered limit") + } + } + }) + } +} + +func TestSortIfBy(t *testing.T) { + tests := []struct { + name string + sortStr string + in sockaddr.IfAddrs + out sockaddr.IfAddrs + fail bool + }{ + { + name: "sort address", + sortStr: "address", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.3")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.3")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4")}, + }, + }, + { + name: "sort +address", + sortStr: "+address", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.3")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.3")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4")}, + }, + }, + { + name: "sort -address", + sortStr: "-address", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.3")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.3")}, + }, + }, + { + // NOTE(seanc@): This test requires macOS, or at least a computer where + // en0 has the default route. + name: "sort default", + sortStr: "default", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + Interface: net.Interface{Name: ifNameWithDefault}, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + Interface: net.Interface{Name: "other0"}, + }, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + Interface: net.Interface{Name: ifNameWithDefault}, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + Interface: net.Interface{Name: "other0"}, + }, + }, + }, + { + // NOTE(seanc@): This test requires macOS, or at least a computer where + // en0 has the default route. + name: "sort +default", + sortStr: "+default", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + Interface: net.Interface{Name: "other0"}, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + Interface: net.Interface{Name: ifNameWithDefault}, + }, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + Interface: net.Interface{Name: ifNameWithDefault}, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + Interface: net.Interface{Name: "other0"}, + }, + }, + }, + { + name: "sort -default", + sortStr: "-default", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + Interface: net.Interface{Name: ifNameWithDefault}, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + Interface: net.Interface{Name: "other0"}, + }, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.4"), + Interface: net.Interface{Name: "other0"}, + }, + sockaddr.IfAddr{ + SockAddr: sockaddr.MustIPv4Addr("1.2.3.3"), + Interface: net.Interface{Name: ifNameWithDefault}, + }, + }, + }, + { + name: "sort name", + sortStr: "name", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{Interface: net.Interface{Name: "foo"}}, + sockaddr.IfAddr{Interface: net.Interface{Name: "bar"}}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{Interface: net.Interface{Name: "bar"}}, + sockaddr.IfAddr{Interface: net.Interface{Name: "foo"}}, + }, + }, + { + name: "sort +name", + sortStr: "+name", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{Interface: net.Interface{Name: "foo"}}, + sockaddr.IfAddr{Interface: net.Interface{Name: "bar"}}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{Interface: net.Interface{Name: "bar"}}, + sockaddr.IfAddr{Interface: net.Interface{Name: "foo"}}, + }, + }, + { + name: "sort -name", + sortStr: "-name", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{Interface: net.Interface{Name: "bar"}}, + sockaddr.IfAddr{Interface: net.Interface{Name: "foo"}}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{Interface: net.Interface{Name: "foo"}}, + sockaddr.IfAddr{Interface: net.Interface{Name: "bar"}}, + }, + }, + { + name: "sort port", + sortStr: "port", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("[::1]:53")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("[::1]:53")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + }, + { + name: "sort +port", + sortStr: "+port", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("[::1]:53")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("[::1]:53")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + }, + { + name: "sort -port", + sortStr: "-port", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("[::1]:53")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("[::1]:53")}, + }, + }, + { + name: "sort private", + sortStr: "private", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + }, + { + name: "sort +private", + sortStr: "+private", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + }, + { + name: "sort -private", + sortStr: "-private", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1")}, + }, + }, + { + name: "sort size", + sortStr: "size", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + }, + { + name: "sort +size", + sortStr: "+size", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + }, + { + name: "sort -size", + sortStr: "-size", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + }, + }, + { + name: "sort type", + sortStr: "type", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("::1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("::1")}, + }, + }, + { + name: "sort +type", + sortStr: "+type", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("::1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("192.168.1.1/27")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("::1")}, + }, + }, + { + name: "sort -type", + sortStr: "-type", + in: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("::1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + out: sockaddr.IfAddrs{ + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv6Addr("::1")}, + sockaddr.IfAddr{SockAddr: sockaddr.MustIPv4Addr("1.2.3.4:80")}, + }, + }, + { + name: "sort invalid", + sortStr: "ENOENT", + fail: true, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %i needs a name", i) + } + + t.Run(test.name, func(t *testing.T) { + sorted, err := sockaddr.SortIfBy(test.sortStr, test.in) + if err != nil && !test.fail { + t.Fatalf("%s: sort failed: %v", test.name, err) + } + + if len(test.in) != len(sorted) { + t.Fatalf("wrong len") + } + + for i := 0; i < len(sorted); i++ { + if !reflect.DeepEqual(sorted[i], test.out[i]) { + t.Errorf("wrong sort order: %d %v %v", i, sorted[i], test.out[i]) + } + } + }) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ifattr_test.go b/vendor/github.com/hashicorp/go-sockaddr/ifattr_test.go new file mode 100644 index 0000000000..b1a01b892e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ifattr_test.go @@ -0,0 +1,112 @@ +package sockaddr_test + +import ( + "testing" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +func TestIfAttr_net(t *testing.T) { + ifAddrs, err := sockaddr.GetAllInterfaces() + if err != nil { + t.Fatalf("Unable to proceed: %v", err) + } + + for _, ifAddr := range ifAddrs { + testSockAddrAttr(t, ifAddr) + } +} + +func TestIfAttr_unix(t *testing.T) { + newUnixSock := func(path string) sockaddr.UnixSock { + sa, err := sockaddr.NewUnixSock(path) + if err != nil { + t.Fatalf("unable to create new unix socket: %v", err) + } + return sa + } + unixSockets := []sockaddr.SockAddr{ + newUnixSock("/tmp/test"), + } + + for _, sa := range unixSockets { + testSockAddrAttr(t, sa) + } +} + +func testSockAddrAttr(t *testing.T, sai interface{}) { + attrNamesPerType := []struct { + name sockaddr.AttrName + ipv4Pass bool + ipv6Pass bool + unixPass bool + }{ + // Universal + {"type", true, true, true}, + {"string", true, true, true}, + // IP + {"name", true, true, false}, + {"size", true, true, false}, + {"flags", true, true, false}, + {"host", true, true, false}, + {"address", true, true, false}, + {"port", true, true, false}, + {"netmask", true, true, false}, + {"network", true, true, false}, + {"mask_bits", true, true, false}, + {"binary", true, true, false}, + {"hex", true, true, false}, + {"first_usable", true, true, false}, + {"last_usable", true, true, false}, + {"octets", true, true, false}, + // IPv4 + {"broadcast", true, false, false}, + {"uint32", true, false, false}, + // IPv6 + {"uint128", false, true, false}, + // Unix + {"path", false, false, true}, + } + + for _, attrTest := range attrNamesPerType { + switch v := sai.(type) { + case sockaddr.IfAddr: + saType := v.Type() + _, err := v.Attr(attrTest.name) + switch saType { + case sockaddr.TypeIPv4: + if err == nil && attrTest.ipv4Pass || err != nil && !attrTest.ipv4Pass { + // pass + } + // fallthrough + case sockaddr.TypeIPv6: + if err == nil && attrTest.ipv6Pass || err != nil && !attrTest.ipv6Pass { + // pass + } + // fallthrough + case sockaddr.TypeUnix: + if err == nil && attrTest.unixPass || err != nil && !attrTest.unixPass { + // pass + } + // fallthrough + default: + t.Errorf("Unable to fetch attr name %q: %v", attrTest.name, err) + } + case sockaddr.SockAddr: + val, err := sockaddr.Attr(v, attrTest.name) + _ = err + + pass := len(val) > 0 + switch { + case v.Type() == sockaddr.TypeIPv4 && attrTest.ipv4Pass == pass, + v.Type() == sockaddr.TypeIPv6 && attrTest.ipv6Pass == pass, + v.Type() == sockaddr.TypeUnix && attrTest.unixPass == pass: + // pass + default: + t.Errorf("Unable to fetch attr name %q from %v / %v + %+q", attrTest.name, v, v.Type(), val) + } + default: + t.Fatal("unsupported type %T %v", sai, sai) + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddr_test.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddr_test.go new file mode 100644 index 0000000000..68ce2c8b48 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddr_test.go @@ -0,0 +1,222 @@ +package sockaddr_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-sockaddr" +) + +func TestSockAddr_IPAddr_CmpAddress(t *testing.T) { + tests := []struct { + a string + b string + cmp int + }{ + { // 0: Same IPAddr (v4), same port + a: "208.67.222.222:0", + b: "208.67.222.222/32", + cmp: 0, + }, + { // 1: Same IPAddr (v6), same port + a: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:0", + b: "2607:f0d0:1002:0051:0000:0000:0000:0004/128", + cmp: 0, + }, + { // 2: Same IPAddr (v4), different port + a: "208.67.222.222:4646", + b: "208.67.222.222/32", + cmp: 0, + }, + { // 3: Same IPAddr (v6), different port + a: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:4646", + b: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:4647", + cmp: 0, + }, + { // 4: Different IPAddr (v4), same port + a: "208.67.220.220:4648", + b: "208.67.222.222:4648", + cmp: -1, + }, + { // 5: Different IPAddr (v6), same port + a: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:4648", + b: "[2607:f0d0:1002:0052:0000:0000:0000:0004]:4648", + cmp: -1, + }, + { // 6: Different IPAddr (v4), different port + a: "208.67.220.220:8600", + b: "208.67.222.222:4648", + cmp: -1, + }, + { // 7: Different IPAddr (v6), different port + a: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:8500", + b: "[2607:f0d0:1002:0052:0000:0000:0000:0004]:4648", + cmp: -1, + }, + { // 8: Incompatible IPAddr (v4 vs v6), same port + a: "208.67.220.220:8600", + b: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:8600", + cmp: 0, + }, + { // 9: Incompatible IPAddr (v4 vs v6), different port + a: "208.67.220.220:8500", + b: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:8600", + cmp: 0, + }, + { // 10: Incompatible SockAddr types + a: "128.95.120.1:123", + b: "/tmp/foo.sock", + cmp: 0, + }, + { // 11: Incompatible SockAddr types + a: "[::]:123", + b: "/tmp/foo.sock", + cmp: 0, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + saA, err := sockaddr.NewSockAddr(test.a) + if err != nil { + t.Fatalf("[%d] Unable to create a SockAddr from %+q: %v", idx, test.a, err) + } + saB, err := sockaddr.NewSockAddr(test.b) + if err != nil { + t.Fatalf("[%d] Unable to create an SockAddr from %+q: %v", idx, test.b, err) + } + + ipA, ok := saA.(sockaddr.IPAddr) + if !ok { + t.Fatalf("[%d] Unable to convert SockAddr %+q to an IPAddr", idx, test.a) + } + + if x := ipA.CmpAddress(saB); x != test.cmp { + t.Errorf("[%d] IPAddr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipA, saB, test.cmp, x) + } + + ipB, ok := saB.(sockaddr.IPAddr) + if !ok { + // Return success for comparing non-IPAddr types + return + } + if x := ipA.CmpAddress(ipB); x != test.cmp { + t.Errorf("[%d] IPAddr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipA, ipB, test.cmp, x) + } + if x := ipB.CmpAddress(ipA); x*-1 != test.cmp { + t.Errorf("[%d] IPAddr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipB, ipA, test.cmp, x) + } + + if x := ipB.CmpAddress(saA); x*-1 != test.cmp { + t.Errorf("[%d] IPAddr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipB, saA, test.cmp, x) + } + }) + } +} + +func TestSockAddr_IPAddr_CmpPort(t *testing.T) { + tests := []struct { + a string + b string + cmp int + }{ + { // 0: Same IPv4Addr, same port + a: "208.67.222.222:0", + b: "208.67.222.222/32", + cmp: 0, + }, + { // 1: Different IPv4Addr, same port + a: "208.67.220.220:0", + b: "208.67.222.222/32", + cmp: 0, + }, + { // 2: Same IPv4Addr, different port + a: "208.67.222.222:80", + b: "208.67.222.222:443", + cmp: -1, + }, + { // 3: Different IPv4Addr, different port + a: "208.67.220.220:8600", + b: "208.67.222.222:53", + cmp: 1, + }, + { // 4: Same IPv6Addr, same port + a: "[::]:0", + b: "::/128", + cmp: 0, + }, + { // 5: Different IPv6Addr, same port + a: "[::]:0", + b: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:0", + cmp: 0, + }, + { // 6: Same IPv6Addr, different port + a: "[::]:8400", + b: "[::]:8600", + cmp: -1, + }, + { // 7: Different IPv6Addr, different port + a: "[::]:8600", + b: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:53", + cmp: 1, + }, + { // 8: Mixed IPAddr types, same port + a: "[::]:53", + b: "208.67.220.220:53", + cmp: 0, + }, + { // 9: Mixed IPAddr types, different port + a: "[::]:53", + b: "128.95.120.1:123", + cmp: -1, + }, + { // 10: Incompatible SockAddr types + a: "128.95.120.1:123", + b: "/tmp/foo.sock", + cmp: 0, + }, + { // 11: Incompatible SockAddr types + a: "[::]:123", + b: "/tmp/foo.sock", + cmp: 0, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + saA, err := sockaddr.NewSockAddr(test.a) + if err != nil { + t.Fatalf("[%d] Unable to create a SockAddr from %+q: %v", idx, test.a, err) + } + saB, err := sockaddr.NewSockAddr(test.b) + if err != nil { + t.Fatalf("[%d] Unable to create an SockAddr from %+q: %v", idx, test.b, err) + } + + ipA, ok := saA.(sockaddr.IPAddr) + if !ok { + t.Fatalf("[%d] Unable to convert SockAddr %+q to an IPAddr", idx, test.a) + } + + if x := ipA.CmpPort(saB); x != test.cmp { + t.Errorf("[%d] IPAddr.CmpPort() failed with %+q with %+q (expected %d, received %d)", idx, ipA, saB, test.cmp, x) + } + + ipB, ok := saB.(sockaddr.IPAddr) + if !ok { + // Return success for comparing non-IPAddr types + return + } + if x := ipA.CmpPort(ipB); x != test.cmp { + t.Errorf("[%d] IPAddr.CmpPort() failed with %+q with %+q (expected %d, received %d)", idx, ipA, ipB, test.cmp, x) + } + if x := ipB.CmpPort(ipA); x*-1 != test.cmp { + t.Errorf("[%d] IPAddr.CmpPort() failed with %+q with %+q (expected %d, received %d)", idx, ipB, ipA, test.cmp, x) + } + + if x := ipB.CmpPort(saA); x*-1 != test.cmp { + t.Errorf("[%d] IPAddr.CmpPort() failed with %+q with %+q (expected %d, received %d)", idx, ipB, saA, test.cmp, x) + } + }) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipaddrs_test.go b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs_test.go new file mode 100644 index 0000000000..a72f77f63c --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipaddrs_test.go @@ -0,0 +1,460 @@ +package sockaddr_test + +import ( + "fmt" + "sort" + "testing" + + "github.com/hashicorp/go-sockaddr" +) + +type GoodTestIPAddrTest struct { + sockAddrs sockaddr.SockAddrs + sortedBySpecificMasklen sockaddr.SockAddrs + sortedByBroadMasklen sockaddr.SockAddrs + sortedByNetwork sockaddr.SockAddrs +} +type GoodTestIPAddrTests []*GoodTestIPAddrTest + +func makeTestIPAddrs(t *testing.T) GoodTestIPAddrTests { + goodTestInputs := []struct { + sockAddrs []string + sortedBySpecificMasklen []string + sortedByBroadMasklen []string + sortedByNetwork []string + }{ + { + sockAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "240.0.0.1/4", + }, + sortedBySpecificMasklen: []string{ + "128.95.120.1/32", + "192.168.1.10/24", + "192.168.0.0/16", + "172.16.1.3/12", + "10.0.0.0/8", + "240.0.0.1/4", + }, + sortedByBroadMasklen: []string{ + "240.0.0.1/4", + "10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.1.10/24", + "128.95.120.1/32", + }, + sortedByNetwork: []string{ + "10.0.0.0/8", + "128.95.120.1/32", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.1.10/24", + "240.0.0.1/4", + }, + }, + } + gfs := make(GoodTestIPAddrTests, 0, len(goodTestInputs)) + for idx, gfi := range goodTestInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + gf := new(GoodTestIPAddrTest) + gf.sockAddrs = make(sockaddr.SockAddrs, 0, len(gfi.sockAddrs)) + for _, n := range gfi.sockAddrs { + sa, err := sockaddr.NewSockAddr(n) + if err != nil { + t.Fatalf("Expected valid network") + } + gf.sockAddrs = append(gf.sockAddrs, sa) + } + + gf.sortedBySpecificMasklen = make(sockaddr.SockAddrs, 0, len(gfi.sortedBySpecificMasklen)) + for _, n := range gfi.sortedBySpecificMasklen { + na, err := sockaddr.NewSockAddr(n) + if err != nil { + t.Fatalf("Expected valid network") + } + gf.sortedBySpecificMasklen = append(gf.sortedBySpecificMasklen, na) + } + + if len(gf.sockAddrs) != len(gf.sortedBySpecificMasklen) { + t.Fatalf("Expected same number of sortedBySpecificMasklen networks") + } + + gf.sortedByBroadMasklen = make(sockaddr.SockAddrs, 0, len(gfi.sortedByBroadMasklen)) + for _, n := range gfi.sortedByBroadMasklen { + na, err := sockaddr.NewSockAddr(n) + if err != nil { + t.Fatalf("Expected valid network") + } + gf.sortedByBroadMasklen = append(gf.sortedByBroadMasklen, na) + } + + if len(gf.sockAddrs) != len(gf.sortedByBroadMasklen) { + t.Fatalf("Expected same number of sortedByBroadMasklen networks") + } + + gf.sortedByNetwork = make(sockaddr.SockAddrs, 0, len(gfi.sortedByNetwork)) + for _, n := range gfi.sortedByNetwork { + na, err := sockaddr.NewSockAddr(n) + if err != nil { + t.Fatalf("Expected valid network") + } + gf.sortedByNetwork = append(gf.sortedByNetwork, na) + } + + if len(gf.sockAddrs) != len(gf.sortedByNetwork) { + t.Fatalf("Expected same number of sortedByNetwork networks") + } + }) + } + + return gfs +} + +func TestSockAddr_IPAddrs_BySpecificMaskLen(t *testing.T) { + testInputs := sockAddrStringInputs{ + { + inputAddrs: []string{"10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "240.0.0.1/4", + }, + sortedAddrs: []string{ + "128.95.120.1/32", + "192.168.1.10/24", + "192.168.0.0/16", + "172.16.1.3/12", + "10.0.0.0/8", + "240.0.0.1/4", + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + inputAddrs := convertToSockAddrs(t, test.inputAddrs) + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + sockaddrs := append(sockaddr.SockAddrs(nil), inputAddrs...) + filteredAddrs, _ := sockaddrs.FilterByType(sockaddr.TypeIPv4) + ipv4Addrs := make([]sockaddr.IPv4Addr, 0, len(filteredAddrs)) + for _, x := range filteredAddrs { + switch v := x.(type) { + case sockaddr.IPv4Addr: + ipv4Addrs = append(ipv4Addrs, v) + default: + t.Fatalf("invalid type") + } + } + + ipAddrs := make([]sockaddr.IPAddr, 0, len(filteredAddrs)) + for _, x := range filteredAddrs { + ipAddr, ok := x.(sockaddr.IPAddr) + if !ok { + t.Fatalf("Unable to typecast to IPAddr") + } + ipAddrs = append(ipAddrs, ipAddr) + } + sort.Sort(sockaddr.SortIPAddrsBySpecificMaskLen{ipAddrs}) + + var lastLen int = 32 + for i, netaddr := range ipAddrs { + maskLen := netaddr.Maskbits() + if lastLen < maskLen { + t.Fatalf("Sort by specific mask length failed") + } + lastLen = maskLen + + if sortedAddrs[i] != netaddr { + t.Errorf("Expected %s, received %s in iteration %d", sortedAddrs[i], netaddr, i) + } + } + }) + } +} + +func TestSockAddr_IPAddrs_ByBroadMaskLen(t *testing.T) { + testInputs := sockAddrStringInputs{ + { + inputAddrs: []string{"10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "240.0.0.1/4", + }, + sortedAddrs: []string{ + "240.0.0.1/4", + "10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.1.10/24", + "128.95.120.1/32", + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + inputAddrs := convertToSockAddrs(t, test.inputAddrs) + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + sockaddrs := append(sockaddr.SockAddrs(nil), inputAddrs...) + filteredAddrs, _ := sockaddrs.FilterByType(sockaddr.TypeIP) + ipAddrs := make([]sockaddr.IPAddr, 0, len(filteredAddrs)) + for _, x := range filteredAddrs { + ipAddr, ok := x.(sockaddr.IPAddr) + if !ok { + t.Fatalf("Unable to typecast to IPAddr") + } + ipAddrs = append(ipAddrs, ipAddr) + } + sort.Sort(sockaddr.SortIPAddrsByBroadMaskLen{ipAddrs}) + + var lastLen int + for i, netaddr := range ipAddrs { + maskLen := netaddr.Maskbits() + if lastLen > maskLen { + t.Fatalf("Sort by specific mask length failed") + } + lastLen = maskLen + + if sortedAddrs[i] != netaddr { + t.Errorf("Expected %s, received %s in iteration %d", sortedAddrs[i], netaddr, i) + } + } + }) + } +} + +func TestSockAddr_IPAddrs_IPAddrsByNetwork(t *testing.T) { + testInputs := sockAddrStringInputs{ + { + inputAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "240.0.0.1/4", + }, + sortedAddrs: []string{ + "10.0.0.0/8", + "128.95.120.1/32", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.1.10/24", + "240.0.0.1/4", + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + inputAddrs := convertToSockAddrs(t, test.inputAddrs) + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + sockaddrs := append(sockaddr.SockAddrs(nil), inputAddrs...) + ipaddrs, _ := sockaddrs.FilterByType(sockaddr.TypeIP) + sockaddr.OrderedAddrBy(sockaddr.AscAddress).Sort(ipaddrs) + + var lastIpUint sockaddr.IPv4Address + for i, sa := range ipaddrs { + ipv4 := *sockaddr.ToIPv4Addr(sa) + if lastIpUint > ipv4.Address { + t.Fatalf("Sort by network failed") + } + lastIpUint = ipv4.Address + + if !ipv4.Equal(sortedAddrs[i]) { + t.Errorf("[%d] Sort equality failed: expected %s, received %s", i, sortedAddrs[i], ipv4) + } + } + }) + } +} + +func TestSockAddr_IPAddrs_IPAddrsByNetworkSize(t *testing.T) { + testInputs := sockAddrStringInputs{ + { + inputAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "128.95.120.2:53", + "128.95.120.2/32", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "128.95.120.2:8600", + "240.0.0.1/4", + }, + sortedAddrs: []string{ + "128.95.120.1/32", + "128.95.120.2:53", + "128.95.120.2:8600", + "128.95.120.2/32", + "192.168.1.10/24", + "192.168.0.0/16", + "172.16.1.3/12", + "10.0.0.0/8", + "240.0.0.1/4", + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + inputAddrs := convertToSockAddrs(t, test.inputAddrs) + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + + sockaddrs := append(sockaddr.SockAddrs(nil), inputAddrs...) + filteredAddrs, _ := sockaddrs.FilterByType(sockaddr.TypeIP) + ipAddrs := make([]sockaddr.IPAddr, 0, len(filteredAddrs)) + for _, x := range filteredAddrs { + ipAddr, ok := x.(sockaddr.IPAddr) + if !ok { + t.Fatalf("Unable to typecast to IPAddr") + } + ipAddrs = append(ipAddrs, ipAddr) + } + sort.Sort(sockaddr.SortIPAddrsByNetworkSize{ipAddrs}) + + // var prevAddr sockaddr.IPAddr + for i, ipAddr := range ipAddrs { + // if i == 0 { + // prevAddr = ipAddr + // continue + // } + + // if prevAddr.Cmp(ipAddr) > 0 { + // t.Logf("[%d] Prev:\t%v", i, prevAddr) + // t.Logf("[%d] ipAddr:\t%v", i, ipAddr) + // t.Fatalf("Sort by network failed") + // } + // prevAddr = ipAddr + + if !ipAddr.Equal(sortedAddrs[i]) { + t.Errorf("[%d] Sort equality failed: expected %s, received %s", i, sortedAddrs[i], ipAddr) + } + } + }) + } +} + +// func TestSockAddr_IPAddrs_IPAddrsByCmp(t *testing.T) { +// testInputs := testIPAddrsInputs{ +// { +// sockAddrs: []string{ +// "10.0.0.0/8", +// "172.16.1.3/12", +// "128.95.120.2:53", +// "128.95.120.2/32", +// "192.168.0.0/16", +// "128.95.120.1/32", +// "192.168.1.10/24", +// "128.95.120.2:8600", +// "240.0.0.1/4", +// }, +// sortedSockAddrs: []string{ +// "128.95.120.1/32", +// "128.95.120.2:53", +// "128.95.120.2:8600", +// "128.95.120.2/32", +// "192.168.1.10/24", +// "192.168.0.0/16", +// "172.16.1.3/12", +// "10.0.0.0/8", +// "240.0.0.1/4", +// }, +// }, +// } + +// for _, test := range makeTestsFromInput(t, testInputs) { +// sockaddrs := append(sockaddr.SockAddrs(nil), test.sockAddrs...) +// ipAddrs := sockaddrs.FilterByTypeIPAddr() +// sort.Sort(sockaddr.SortIPAddrsByCmp{ipAddrs}) +// t.Logf("Here: %+v", ipAddrs) + +// var prevAddr sockaddr.IPAddr +// for i, ipAddr := range ipAddrs { +// if i == 0 { +// prevAddr = ipAddr +// continue +// } + +// if prevAddr.Cmp(ipAddr) > 0 { +// t.Logf("[%d] Prev:\t%v", i, prevAddr) +// t.Logf("[%d] ipAddr:\t%v", i, ipAddr) +// t.Fatalf("Sort by network failed") +// } +// prevAddr = ipAddr + +// if !ipAddr.Equal(test.sortedSockAddrs[i]) { +// t.Errorf("[%d] Sort equality failed: expected %s, received %s", i, test.sortedSockAddrs[i], ipAddr) +// } +// } +// } +// } + +func TestSockAddr_IPAddrs_IPAddrsByCmp(t *testing.T) { + testInputs := sockAddrStringInputs{ + { + inputAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "128.95.120.2:53", + "128.95.120.2:53", + "128.95.120.2/32", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "128.95.120.2:8600", + "0:0:0:0:0:0:0:0", + "0:0:0:0:0:0:0:1", + "2607:f0d0:1002:0051:0000:0000:0000:0004", + "2607:f0d0:1002:0051:0000:0000:0000:0003", + "2607:f0d0:1002:0051:0000:0000:0000:0005", + "[2607:f0d0:1002:0051:0000:0000:0000:0004]:8600", + "240.0.0.1/4", + }, + sortedAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.1.10/24", + "240.0.0.1/4", + "128.95.120.1/32", + "128.95.120.2/32", + "128.95.120.2:53", + "128.95.120.2:53", + "128.95.120.2:8600", + "0:0:0:0:0:0:0:0", + "0:0:0:0:0:0:0:1", + "2607:f0d0:1002:0051:0000:0000:0000:0003", + "2607:f0d0:1002:0051:0000:0000:0000:0004", + "[2607:f0d0:1002:0051:0000:0000:0000:0004]:8600", + "2607:f0d0:1002:0051:0000:0000:0000:0005", + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + shuffleStrings(test.inputAddrs) + + inputAddrs := convertToSockAddrs(t, test.inputAddrs) + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + + sockaddr.OrderedAddrBy(sockaddr.AscType, sockaddr.AscPrivate, sockaddr.AscAddress, sockaddr.AscPort).Sort(inputAddrs) + + for i, sockAddr := range inputAddrs { + if !sockAddr.Equal(sortedAddrs[i]) { + t.Errorf("[%d] Sort equality failed: expected %s, received %s", i, sortedAddrs[i], sockAddr) + } + } + }) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv4addr_test.go b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr_test.go new file mode 100644 index 0000000000..0b3d8b0c66 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipv4addr_test.go @@ -0,0 +1,991 @@ +package sockaddr_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/go-sockaddr" +) + +func TestSockAddr_IPv4Addr(t *testing.T) { + tests := []struct { + z00_input string + z01_addrHexStr string + z02_addrBinStr string + z03_addrStr string + z04_NetIPStringOut string + z05_addrInt sockaddr.IPv4Address + z06_netInt sockaddr.IPv4Network + z07_ipMaskStr string + z08_maskbits int + z09_NetIPNetStringOut string + z10_maskInt sockaddr.IPv4Mask + z11_networkStr string + z12_octets []int + z13_firstUsable string + z14_lastUsable string + z15_broadcast string + z16_portInt sockaddr.IPPort + z17_DialPacketArgs []string + z18_DialStreamArgs []string + z19_ListenPacketArgs []string + z20_ListenStreamArgs []string + z21_IsRFC1918 bool + z22_IsRFC6598 bool + z23_IsRFC6890 bool + z99_pass bool + }{ + { // 0 + z00_input: "0.0.0.0", + z01_addrHexStr: "00000000", + z02_addrBinStr: "00000000000000000000000000000000", + z03_addrStr: "0.0.0.0", + z04_NetIPStringOut: "0.0.0.0", + z05_addrInt: 0, + z06_netInt: 0, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "0.0.0.0/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "0.0.0.0", + z12_octets: []int{0, 0, 0, 0}, + z13_firstUsable: "0.0.0.0", + z14_lastUsable: "0.0.0.0", + z15_broadcast: "0.0.0.0", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "0.0.0.0:0"}, + z20_ListenStreamArgs: []string{"tcp4", "0.0.0.0:0"}, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 1 + z00_input: "0.0.0.0:80", + z01_addrHexStr: "00000000", + z02_addrBinStr: "00000000000000000000000000000000", + z03_addrStr: "0.0.0.0:80", + z04_NetIPStringOut: "0.0.0.0", + z05_addrInt: 0, + z06_netInt: 0, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "0.0.0.0/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "0.0.0.0", + z12_octets: []int{0, 0, 0, 0}, + z13_firstUsable: "0.0.0.0", + z14_lastUsable: "0.0.0.0", + z15_broadcast: "0.0.0.0", + z16_portInt: 80, + z17_DialPacketArgs: []string{"udp4", "0.0.0.0:80"}, + z18_DialStreamArgs: []string{"tcp4", "0.0.0.0:80"}, + z19_ListenPacketArgs: []string{"udp4", "0.0.0.0:80"}, + z20_ListenStreamArgs: []string{"tcp4", "0.0.0.0:80"}, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 2 + z00_input: "0.0.0.0/0", + z01_addrHexStr: "00000000", + z02_addrBinStr: "00000000000000000000000000000000", + z03_addrStr: "0.0.0.0/0", + z04_NetIPStringOut: "0.0.0.0", + z05_addrInt: 0, + z06_netInt: 0, + z07_ipMaskStr: "00000000", + z09_NetIPNetStringOut: "0.0.0.0/0", + z10_maskInt: 0, + z11_networkStr: "0.0.0.0/0", + z12_octets: []int{0, 0, 0, 0}, + z13_firstUsable: "0.0.0.1", + z14_lastUsable: "255.255.255.254", + z15_broadcast: "255.255.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z99_pass: true, + }, + { // 3 + z00_input: "0.0.0.1", + z01_addrHexStr: "00000001", + z02_addrBinStr: "00000000000000000000000000000001", + z03_addrStr: "0.0.0.1", + z04_NetIPStringOut: "0.0.0.1", + z05_addrInt: 1, + z06_netInt: 1, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "0.0.0.1/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "0.0.0.1", + z12_octets: []int{0, 0, 0, 1}, + z13_firstUsable: "0.0.0.1", + z14_lastUsable: "0.0.0.1", + z15_broadcast: "0.0.0.1", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "0.0.0.1:0"}, + z20_ListenStreamArgs: []string{"tcp4", "0.0.0.1:0"}, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 4 + z00_input: "0.0.0.1/1", + z01_addrHexStr: "00000001", + z02_addrBinStr: "00000000000000000000000000000001", + z03_addrStr: "0.0.0.1/1", + z04_NetIPStringOut: "0.0.0.1", + z05_addrInt: 1, + z06_netInt: 0, + z07_ipMaskStr: "80000000", + z08_maskbits: 1, + z09_NetIPNetStringOut: "0.0.0.0/1", + z10_maskInt: 2147483648, + z11_networkStr: "0.0.0.0/1", + z12_octets: []int{0, 0, 0, 1}, + z13_firstUsable: "0.0.0.1", + z14_lastUsable: "127.255.255.254", + z15_broadcast: "127.255.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z99_pass: true, + }, + { // 5 + z00_input: "1.2.3.4", + z01_addrHexStr: "01020304", + z02_addrBinStr: "00000001000000100000001100000100", + z03_addrStr: "1.2.3.4", + z04_NetIPStringOut: "1.2.3.4", + z05_addrInt: 16909060, + z06_netInt: 16909060, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "1.2.3.4/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "1.2.3.4", + z12_octets: []int{1, 2, 3, 4}, + z13_firstUsable: "1.2.3.4", + z14_lastUsable: "1.2.3.4", + z15_broadcast: "1.2.3.4", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "1.2.3.4:0"}, + z20_ListenStreamArgs: []string{"tcp4", "1.2.3.4:0"}, + z99_pass: true, + }, + { // 6 + z00_input: "10.0.0.0/8", + z01_addrHexStr: "0a000000", + z02_addrBinStr: "00001010000000000000000000000000", + z03_addrStr: "10.0.0.0/8", + z04_NetIPStringOut: "10.0.0.0", + z05_addrInt: 167772160, + z06_netInt: 167772160, + z07_ipMaskStr: "ff000000", + z08_maskbits: 8, + z09_NetIPNetStringOut: "10.0.0.0/8", + z10_maskInt: 4278190080, + z11_networkStr: "10.0.0.0/8", + z12_octets: []int{10, 0, 0, 0}, + z13_firstUsable: "10.0.0.1", + z14_lastUsable: "10.255.255.254", + z15_broadcast: "10.255.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z21_IsRFC1918: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 7 + z00_input: "128.0.0.0", + z01_addrHexStr: "80000000", + z02_addrBinStr: "10000000000000000000000000000000", + z03_addrStr: "128.0.0.0", + z04_NetIPStringOut: "128.0.0.0", + z05_addrInt: 2147483648, + z06_netInt: 2147483648, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "128.0.0.0/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "128.0.0.0", + z12_octets: []int{128, 0, 0, 0}, + z13_firstUsable: "128.0.0.0", + z14_lastUsable: "128.0.0.0", + z15_broadcast: "128.0.0.0", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "128.0.0.0:0"}, + z20_ListenStreamArgs: []string{"tcp4", "128.0.0.0:0"}, + z99_pass: true, + }, + { // 8 + z00_input: "128.95.120.1/32", + z01_addrHexStr: "805f7801", + z02_addrBinStr: "10000000010111110111100000000001", + z03_addrStr: "128.95.120.1", + z04_NetIPStringOut: "128.95.120.1", + z05_addrInt: 2153740289, + z06_netInt: 2153740289, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "128.95.120.1/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "128.95.120.1", + z12_octets: []int{128, 95, 120, 1}, + z13_firstUsable: "128.95.120.1", + z14_lastUsable: "128.95.120.1", + z15_broadcast: "128.95.120.1", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "128.95.120.1:0"}, + z20_ListenStreamArgs: []string{"tcp4", "128.95.120.1:0"}, + z99_pass: true, + }, + { // 9 + z00_input: "172.16.1.3/12", + z01_addrHexStr: "ac100103", + z02_addrBinStr: "10101100000100000000000100000011", + z03_addrStr: "172.16.1.3/12", + z04_NetIPStringOut: "172.16.1.3", + z05_addrInt: 2886729987, + z06_netInt: 2886729728, + z07_ipMaskStr: "fff00000", + z08_maskbits: 12, + z09_NetIPNetStringOut: "172.16.0.0/12", + z10_maskInt: 4293918720, + z11_networkStr: "172.16.0.0/12", + z12_octets: []int{172, 16, 1, 3}, + z13_firstUsable: "172.16.0.1", + z14_lastUsable: "172.31.255.254", + z15_broadcast: "172.31.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z21_IsRFC1918: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 10 + z00_input: "192.168.0.0/16", + z01_addrHexStr: "c0a80000", + z02_addrBinStr: "11000000101010000000000000000000", + z03_addrStr: "192.168.0.0/16", + z04_NetIPStringOut: "192.168.0.0", + z05_addrInt: 3232235520, + z06_netInt: 3232235520, + z07_ipMaskStr: "ffff0000", + z08_maskbits: 16, + z09_NetIPNetStringOut: "192.168.0.0/16", + z10_maskInt: 4294901760, + z11_networkStr: "192.168.0.0/16", + z12_octets: []int{192, 168, 0, 0}, + z13_firstUsable: "192.168.0.1", + z14_lastUsable: "192.168.255.254", + z15_broadcast: "192.168.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z21_IsRFC1918: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 11 + z00_input: "192.168.0.1", + z01_addrHexStr: "c0a80001", + z02_addrBinStr: "11000000101010000000000000000001", + z03_addrStr: "192.168.0.1", + z04_NetIPStringOut: "192.168.0.1", + z05_addrInt: 3232235521, + z06_netInt: 3232235521, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "192.168.0.1/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "192.168.0.1", + z12_octets: []int{192, 168, 0, 1}, + z13_firstUsable: "192.168.0.1", + z14_lastUsable: "192.168.0.1", + z15_broadcast: "192.168.0.1", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "192.168.0.1:0"}, + z20_ListenStreamArgs: []string{"tcp4", "192.168.0.1:0"}, + z21_IsRFC1918: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 12 + z00_input: "192.168.0.2/31", + z01_addrHexStr: "c0a80002", + z02_addrBinStr: "11000000101010000000000000000010", + z03_addrStr: "192.168.0.2/31", + z04_NetIPStringOut: "192.168.0.2", + z05_addrInt: 3232235522, + z06_netInt: 3232235522, + z07_ipMaskStr: "fffffffe", + z08_maskbits: 31, + z09_NetIPNetStringOut: "192.168.0.2/31", + z10_maskInt: 4294967294, + z11_networkStr: "192.168.0.2/31", + z12_octets: []int{192, 168, 0, 2}, + z13_firstUsable: "192.168.0.2", + z14_lastUsable: "192.168.0.3", + z15_broadcast: "192.168.0.3", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z21_IsRFC1918: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 13 + z00_input: "192.168.1.10/24", + z01_addrHexStr: "c0a8010a", + z02_addrBinStr: "11000000101010000000000100001010", + z03_addrStr: "192.168.1.10/24", + z04_NetIPStringOut: "192.168.1.10", + z05_addrInt: 3232235786, + z06_netInt: 3232235776, + z07_ipMaskStr: "ffffff00", + z08_maskbits: 24, + z09_NetIPNetStringOut: "192.168.1.0/24", + z10_maskInt: 4294967040, + z11_networkStr: "192.168.1.0/24", + z12_octets: []int{192, 168, 1, 10}, + z13_firstUsable: "192.168.1.1", + z14_lastUsable: "192.168.1.254", + z15_broadcast: "192.168.1.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z21_IsRFC1918: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 14 + z00_input: "192.168.10.10/16", + z01_addrHexStr: "c0a80a0a", + z02_addrBinStr: "11000000101010000000101000001010", + z03_addrStr: "192.168.10.10/16", + z04_NetIPStringOut: "192.168.10.10", + z05_addrInt: 3232238090, + z06_netInt: 3232235520, + z07_ipMaskStr: "ffff0000", + z08_maskbits: 16, + z09_NetIPNetStringOut: "192.168.0.0/16", + z10_maskInt: 4294901760, + z11_networkStr: "192.168.0.0/16", + z12_octets: []int{192, 168, 10, 10}, + z13_firstUsable: "192.168.0.1", + z14_lastUsable: "192.168.255.254", + z15_broadcast: "192.168.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z21_IsRFC1918: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 15 + z00_input: "240.0.0.0/4", + z01_addrHexStr: "f0000000", + z02_addrBinStr: "11110000000000000000000000000000", + z03_addrStr: "240.0.0.0/4", + z04_NetIPStringOut: "240.0.0.0", + z05_addrInt: 4026531840, + z06_netInt: 4026531840, + z07_ipMaskStr: "f0000000", + z08_maskbits: 4, + z09_NetIPNetStringOut: "240.0.0.0/4", + z10_maskInt: 4026531840, + z11_networkStr: "240.0.0.0/4", + z12_octets: []int{240, 0, 0, 0}, + z13_firstUsable: "240.0.0.1", + z14_lastUsable: "255.255.255.254", + z15_broadcast: "255.255.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 16 + z00_input: "240.0.0.1/4", + z01_addrHexStr: "f0000001", + z02_addrBinStr: "11110000000000000000000000000001", + z03_addrStr: "240.0.0.1/4", + z04_NetIPStringOut: "240.0.0.1", + z05_addrInt: 4026531841, + z06_netInt: 4026531840, + z07_ipMaskStr: "f0000000", + z08_maskbits: 4, + z09_NetIPNetStringOut: "240.0.0.0/4", + z10_maskInt: 4026531840, + z11_networkStr: "240.0.0.0/4", + z12_octets: []int{240, 0, 0, 1}, + z13_firstUsable: "240.0.0.1", + z14_lastUsable: "255.255.255.254", + z15_broadcast: "255.255.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 17 + z00_input: "255.255.255.255", + z01_addrHexStr: "ffffffff", + z02_addrBinStr: "11111111111111111111111111111111", + z03_addrStr: "255.255.255.255", + z04_NetIPStringOut: "255.255.255.255", + z05_addrInt: 4294967295, + z06_netInt: 4294967295, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "255.255.255.255/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "255.255.255.255", + z12_octets: []int{255, 255, 255, 255}, + z13_firstUsable: "255.255.255.255", + z14_lastUsable: "255.255.255.255", + z15_broadcast: "255.255.255.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "255.255.255.255:0"}, + z20_ListenStreamArgs: []string{"tcp4", "255.255.255.255:0"}, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 18 + z00_input: "www.hashicorp.com", + z99_pass: false, + }, + { // 19 + z00_input: "2001:DB8::/48", + z99_pass: false, + }, + { // 20 + z00_input: "2001:DB8::", + z99_pass: false, + }, + { // 21 + z00_input: "128.95.120.1:8600", + z01_addrHexStr: "805f7801", + z02_addrBinStr: "10000000010111110111100000000001", + z03_addrStr: "128.95.120.1:8600", + z04_NetIPStringOut: "128.95.120.1", + z05_addrInt: 2153740289, + z06_netInt: 2153740289, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "128.95.120.1/32", + z10_maskInt: sockaddr.IPv4HostMask, + z11_networkStr: "128.95.120.1", + z12_octets: []int{128, 95, 120, 1}, + z13_firstUsable: "128.95.120.1", + z14_lastUsable: "128.95.120.1", + z15_broadcast: "128.95.120.1", + z16_portInt: 8600, + z17_DialPacketArgs: []string{"udp4", "128.95.120.1:8600"}, + z18_DialStreamArgs: []string{"tcp4", "128.95.120.1:8600"}, + z19_ListenPacketArgs: []string{"udp4", "128.95.120.1:8600"}, + z20_ListenStreamArgs: []string{"tcp4", "128.95.120.1:8600"}, + z99_pass: true, + }, + { // 22 + z00_input: "100.64.2.3/23", + z01_addrHexStr: "64400203", + z02_addrBinStr: "01100100010000000000001000000011", + z03_addrStr: "100.64.2.3/23", + z04_NetIPStringOut: "100.64.2.3", + z05_addrInt: 1681916419, + z06_netInt: 1681916416, + z07_ipMaskStr: "fffffe00", + z08_maskbits: 23, + z09_NetIPNetStringOut: "100.64.2.0/23", + z10_maskInt: 4294966784, + z11_networkStr: "100.64.2.0/23", + z12_octets: []int{100, 64, 2, 3}, + z13_firstUsable: "100.64.2.1", + z14_lastUsable: "100.64.3.254", + z15_broadcast: "100.64.3.255", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", ""}, + z20_ListenStreamArgs: []string{"tcp4", ""}, + z22_IsRFC6598: true, + z23_IsRFC6890: true, + z99_pass: true, + }, + { // 23 + z00_input: "192.168.3.53/00ffffff", + z01_addrHexStr: "c0a80335", + z02_addrBinStr: "11000000101010000000001100110101", + z03_addrStr: "192.168.3.53", + z04_NetIPStringOut: "192.168.3.53", + z05_addrInt: 3232236341, + z06_netInt: 3232236341, + z07_ipMaskStr: "ffffffff", + z08_maskbits: 32, + z09_NetIPNetStringOut: "192.168.3.53/32", + z10_maskInt: 4294967295, + z11_networkStr: "192.168.3.53", + z12_octets: []int{192, 168, 3, 53}, + z13_firstUsable: "192.168.3.53", + z14_lastUsable: "192.168.3.53", + z15_broadcast: "192.168.3.53", + z17_DialPacketArgs: []string{"udp4", ""}, + z18_DialStreamArgs: []string{"tcp4", ""}, + z19_ListenPacketArgs: []string{"udp4", "192.168.3.53:0"}, + z20_ListenStreamArgs: []string{"tcp4", "192.168.3.53:0"}, + z21_IsRFC1918: true, + z22_IsRFC6598: false, + z23_IsRFC6890: true, + z99_pass: true, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv4, err := sockaddr.NewIPv4Addr(test.z00_input) + if test.z99_pass && err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, test.z00_input, err) + } else if !test.z99_pass && err == nil { + t.Fatalf("[%d] Expected test to fail for %+q", idx, test.z00_input) + } else if !test.z99_pass && err != nil { + // Expected failure, return successfully + return + } + + if type_ := ipv4.Type(); type_ != sockaddr.TypeIPv4 { + t.Errorf("[%d] Expected new IPv4Addr to be Type %d, received %d (int)", idx, sockaddr.TypeIPv4, type_) + } + + h, ok := ipv4.Host().(sockaddr.IPv4Addr) + if !ok { + t.Errorf("[%d] Unable to type assert +%q's Host to IPv4Addr", idx, test.z00_input) + } + + if h.Address != ipv4.Address || h.Mask != sockaddr.IPv4HostMask || h.Port != ipv4.Port { + t.Errorf("[%d] Expected %+q's Host() to return identical IPv4Addr except mask, received %+q", idx, test.z00_input, h.String()) + } + + if c := cap(*ipv4.NetIP()); c != sockaddr.IPv4len { + t.Errorf("[%d] Expected new IPv4Addr's Address capacity to be %d bytes, received %d", idx, sockaddr.IPv4len, c) + } + + if l := len(*ipv4.NetIP()); l != sockaddr.IPv4len { + t.Errorf("[%d] Expected new IPv4Addr's Address length to be %d bytes, received %d", idx, sockaddr.IPv4len, l) + } + + if s := ipv4.AddressHexString(); s != test.z01_addrHexStr { + t.Errorf("[%d] Expected address %+q's hexadecimal representation to be %+q, received %+q", idx, test.z00_input, test.z01_addrHexStr, s) + } + + if s := ipv4.AddressBinString(); s != test.z02_addrBinStr { + t.Errorf("[%d] Expected address %+q's binary representation to be %+q, received %+q", idx, test.z00_input, test.z02_addrBinStr, s) + } + + if s := ipv4.String(); s != test.z03_addrStr { + t.Errorf("[%d] Expected %+q's String to be %+q, received %+q", idx, test.z00_input, test.z03_addrStr, s) + } + + if s := ipv4.NetIP().String(); s != test.z04_NetIPStringOut { + t.Errorf("[%d] Expected %+q's address to be %+q, received %+q", idx, test.z00_input, test.z04_NetIPStringOut, s) + } + + if a := ipv4.Address; a != test.z05_addrInt { + t.Errorf("[%d] Expected %+q's Address to return %d, received %d", idx, test.z00_input, test.z05_addrInt, a) + } + + if n, ok := ipv4.Network().(sockaddr.IPv4Addr); !ok || n.Address != sockaddr.IPv4Address(test.z06_netInt) { + t.Errorf("[%d] Expected %+q's Network to return %d, received %d", idx, test.z00_input, test.z06_netInt, n.Address) + } + + if m := ipv4.NetIPMask().String(); m != test.z07_ipMaskStr { + t.Errorf("[%d] Expected %+q's mask to be %+q, received %+q", idx, test.z00_input, test.z07_ipMaskStr, m) + } + + if m := ipv4.Maskbits(); m != test.z08_maskbits { + t.Errorf("[%d] Expected %+q's port to be %d, received %d", idx, test.z00_input, test.z08_maskbits, m) + } + + if n := ipv4.NetIPNet().String(); n != test.z09_NetIPNetStringOut { + t.Errorf("[%d] Expected %+q's network to be %+q, received %+q", idx, test.z00_input, test.z09_NetIPNetStringOut, n) + } + + if m := ipv4.Mask; m != test.z10_maskInt { + t.Errorf("[%d] Expected %+q's Mask to return %d, received %d", idx, test.z00_input, test.z10_maskInt, m) + } + + // Network()'s mask must match the IPv4Addr's Mask + if n, ok := ipv4.Network().(sockaddr.IPv4Addr); !ok || n.Mask != test.z10_maskInt { + t.Errorf("[%d] Expected %+q's Network's Mask to return %d, received %d", idx, test.z00_input, test.z10_maskInt, n.Mask) + } + + if n := ipv4.Network().String(); n != test.z11_networkStr { + t.Errorf("[%d] Expected %+q's Network() to be %+q, received %+q", idx, test.z00_input, test.z11_networkStr, n) + } + + if o := ipv4.Octets(); len(o) != 4 || o[0] != test.z12_octets[0] || o[1] != test.z12_octets[1] || o[2] != test.z12_octets[2] || o[3] != test.z12_octets[3] { + t.Errorf("[%d] Expected %+q's Octets to be %+v, received %+v", idx, test.z00_input, test.z12_octets, o) + } + + if f := ipv4.FirstUsable().String(); f != test.z13_firstUsable { + t.Errorf("[%d] Expected %+q's FirstUsable() to be %+q, received %+q", idx, test.z00_input, test.z13_firstUsable, f) + } + + if l := ipv4.LastUsable().String(); l != test.z14_lastUsable { + t.Errorf("[%d] Expected %+q's LastUsable() to be %+q, received %+q", idx, test.z00_input, test.z14_lastUsable, l) + } + + if b := ipv4.Broadcast().String(); b != test.z15_broadcast { + t.Errorf("[%d] Expected %+q's broadcast to be %+q, received %+q", idx, test.z00_input, test.z15_broadcast, b) + } + + if p := ipv4.IPPort(); sockaddr.IPPort(p) != test.z16_portInt || sockaddr.IPPort(p) != test.z16_portInt { + t.Errorf("[%d] Expected %+q's port to be %d, received %d", idx, test.z00_input, test.z16_portInt, p) + } + + if dialNet, dialArgs := ipv4.DialPacketArgs(); dialNet != test.z17_DialPacketArgs[0] || dialArgs != test.z17_DialPacketArgs[1] { + t.Errorf("[%d] Expected %+q's DialPacketArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z17_DialPacketArgs, dialNet, dialArgs) + } + + if dialNet, dialArgs := ipv4.DialStreamArgs(); dialNet != test.z18_DialStreamArgs[0] || dialArgs != test.z18_DialStreamArgs[1] { + t.Errorf("[%d] Expected %+q's DialStreamArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z18_DialStreamArgs, dialNet, dialArgs) + } + + if listenNet, listenArgs := ipv4.ListenPacketArgs(); listenNet != test.z19_ListenPacketArgs[0] || listenArgs != test.z19_ListenPacketArgs[1] { + t.Errorf("[%d] Expected %+q's ListenPacketArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z19_ListenPacketArgs, listenNet, listenArgs) + } + + if listenNet, listenArgs := ipv4.ListenStreamArgs(); listenNet != test.z20_ListenStreamArgs[0] || listenArgs != test.z20_ListenStreamArgs[1] { + t.Errorf("[%d] Expected %+q's ListenStreamArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z20_ListenStreamArgs, listenNet, listenArgs) + } + + if v := sockaddr.IsRFC(1918, ipv4); v != test.z21_IsRFC1918 { + t.Errorf("[%d] Expected IsRFC(1918, %+q) to be %+q, received %+q", idx, test.z00_input, test.z21_IsRFC1918, v) + } + + if v := sockaddr.IsRFC(6598, ipv4); v != test.z22_IsRFC6598 { + t.Errorf("[%d] Expected IsRFC(6598, %+q) to be %+q, received %+q", idx, test.z00_input, test.z22_IsRFC6598, v) + } + + if v := sockaddr.IsRFC(6890, ipv4); v != test.z23_IsRFC6890 { + t.Errorf("[%d] Expected IsRFC(6890, %+q) to be %+q, received %+q", idx, test.z00_input, test.z23_IsRFC6890, v) + } + }) + } +} + +func TestSockAddr_IPv4Addr_CmpAddress(t *testing.T) { + tests := []struct { + a string + b string + cmp int + }{ + { // 0 + a: "208.67.222.222/32", + b: "208.67.222.222", + cmp: 0, + }, + { // 1 + a: "208.67.222.222/32", + b: "208.67.222.222/32", + cmp: 0, + }, + { // 2 + a: "208.67.222.222/32", + b: "208.67.222.222:0", + cmp: 0, + }, + { // 3 + a: "208.67.222.220/32", + b: "208.67.222.222/32", + cmp: -1, + }, + { // 4 + a: "208.67.222.222/32", + b: "208.67.222.220/32", + cmp: 1, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv4a, err := sockaddr.NewIPv4Addr(test.a) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, test.a, err) + } + + ipv4b, err := sockaddr.NewIPv4Addr(test.b) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, test.b, err) + } + + if x := ipv4a.CmpAddress(ipv4b); x != test.cmp { + t.Errorf("[%d] IPv4Addr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipv4a, ipv4b, test.cmp, x) + } + + if x := ipv4b.CmpAddress(ipv4a); x*-1 != test.cmp { + t.Errorf("[%d] IPv4Addr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipv4a, ipv4b, test.cmp, x) + } + }) + } +} + +func TestSockAddr_IPv4Addr_ContainsAddress(t *testing.T) { + tests := []struct { + input string + pass []string + fail []string + }{ + { // 0 + input: "208.67.222.222/32", + pass: []string{ + "208.67.222.222", + "208.67.222.222/32", + "208.67.222.223/31", + "208.67.222.222/31", + "0.0.0.0/0", + }, + fail: []string{ + "0.0.0.0/1", + "208.67.222.220/31", + "208.67.220.224/31", + "208.67.220.220/32", + }, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv4, err := sockaddr.NewIPv4Addr(test.input) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, test.input, err) + } + + for passIdx, passInput := range test.pass { + passAddr, err := sockaddr.NewIPv4Addr(passInput) + if err != nil { + t.Fatalf("[%d/%d] Unable to create an IPv4Addr from %+q: %v", idx, passIdx, passInput, err) + } + + if !passAddr.ContainsAddress(ipv4.Address) { + t.Errorf("[%d/%d] Expected %+q to contain %+q", idx, passIdx, test.input, passInput) + } + } + + for failIdx, failInput := range test.fail { + failAddr, err := sockaddr.NewIPv4Addr(failInput) + if err != nil { + t.Fatalf("[%d/%d] Unable to create an IPv4Addr from %+q: %v", idx, failIdx, failInput, err) + } + + if failAddr.ContainsAddress(ipv4.Address) { + t.Errorf("[%d/%d] Expected %+q to contain %+q", idx, failIdx, test.input, failInput) + } + } + }) + } +} + +func TestSockAddr_IPv4Addr_CmpPort(t *testing.T) { + tests := []struct { + a string + b string + cmp int + }{ + { // 0: Same port, same IP + a: "208.67.222.222:0", + b: "208.67.222.222/32", + cmp: 0, + }, + { // 1: Same port, different IP + a: "208.67.222.220:0", + b: "208.67.222.222/32", + cmp: 0, + }, + { // 2: Same IP, different port + a: "208.67.222.222:80", + b: "208.67.222.222:443", + cmp: -1, + }, + { // 3: Same IP, different port + a: "208.67.222.222:443", + b: "208.67.222.222:80", + cmp: 1, + }, + { // 4: Different IP, different port + a: "208.67.222.222:53", + b: "208.67.220.220:8600", + cmp: -1, + }, + { // 5: Different IP, different port + a: "208.67.222.222:8600", + b: "208.67.220.220:53", + cmp: 1, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv4a, err := sockaddr.NewIPv4Addr(test.a) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, test.a, err) + } + + ipv4b, err := sockaddr.NewIPv4Addr(test.b) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, test.b, err) + } + + if x := ipv4a.CmpPort(ipv4b); x != test.cmp { + t.Errorf("[%d] IPv4Addr.CmpPort() failed with %+q with %+q (expected %d, received %d)", idx, ipv4a, ipv4b, test.cmp, x) + } + + if x := ipv4b.CmpPort(ipv4a); x*-1 != test.cmp { + t.Errorf("[%d] IPv4Addr.CmpPort() failed with %+q with %+q (expected %d, received %d)", idx, ipv4a, ipv4b, test.cmp, x) + } + }) + } +} + +func TestSockAddr_IPv4Addr_Equal(t *testing.T) { + tests := []struct { + name string + input string + pass []string + fail []string + }{ + { + name: "passing", + input: "208.67.222.222/32", + pass: []string{"208.67.222.222", "208.67.222.222/32", "208.67.222.222:0"}, + fail: []string{"208.67.222.222/31", "208.67.220.220", "208.67.220.220/32", "208.67.222.222:5432"}, + }, + { + name: "failing", + input: "4.2.2.1", + pass: []string{"4.2.2.1", "4.2.2.1/32"}, + fail: []string{"4.2.2.1/0", "4.2.2.2", "4.2.2.2/32", "::1"}, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv4, err := sockaddr.NewIPv4Addr(test.input) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, test.input, err) + } + + for goodIdx, passInput := range test.pass { + good, err := sockaddr.NewIPv4Addr(passInput) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, passInput, err) + } + + if !ipv4.Equal(good) { + t.Errorf("[%d/%d] Expected %+q to be equal to %+q: %+q/%+q", idx, goodIdx, test.input, passInput, ipv4.String(), good.String()) + } + } + + for failIdx, failInput := range test.fail { + fail, err := sockaddr.NewIPAddr(failInput) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv4Addr from %+q: %v", idx, failInput, err) + } + + if ipv4.Equal(fail) { + t.Errorf("[%d/%d] Expected %+q to be not equal to %+q", idx, failIdx, test.input, failInput) + } + } + }) + } +} + +func TestIPv4CmpRFC(t *testing.T) { + tests := []struct { + name string + ipv4 sockaddr.IPv4Addr + rfc uint + sa sockaddr.SockAddr + ret int + }{ + { + name: "ipv4 rfc cmp recv match not arg", + ipv4: sockaddr.MustIPv4Addr("192.168.1.10"), + rfc: 1918, + sa: sockaddr.MustIPv6Addr("::1"), + ret: -1, + }, + { + name: "ipv4 rfc cmp recv match", + ipv4: sockaddr.MustIPv4Addr("192.168.1.2"), + rfc: 1918, + sa: sockaddr.MustIPv4Addr("203.1.2.3"), + ret: -1, + }, + { + name: "ipv4 rfc cmp defer", + ipv4: sockaddr.MustIPv4Addr("192.168.1.3"), + rfc: 1918, + sa: sockaddr.MustIPv4Addr("192.168.1.4"), + ret: 0, + }, + { + name: "ipv4 rfc cmp recv not match", + ipv4: sockaddr.MustIPv4Addr("1.2.3.4"), + rfc: 1918, + sa: sockaddr.MustIPv4Addr("203.1.2.3"), + ret: 0, + }, + { + name: "ipv4 rfc cmp recv not match arg", + ipv4: sockaddr.MustIPv4Addr("1.2.3.4"), + rfc: 1918, + sa: sockaddr.MustIPv6Addr("::1"), + ret: 0, + }, + { + name: "ipv4 rfc cmp arg match", + ipv4: sockaddr.MustIPv4Addr("1.2.3.4"), + rfc: 1918, + sa: sockaddr.MustIPv4Addr("192.168.1.5"), + ret: 1, + }, + } + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + + t.Run(test.name, func(t *testing.T) { + ipv4 := test.ipv4 + if ret := ipv4.CmpRFC(test.rfc, test.sa); ret != test.ret { + t.Errorf("%s: unexpected ret: wanted %d got %d", test.name, test.ret, ret) + } + }) + } +} + +func TestIPv4Attrs(t *testing.T) { + const expectedNumAttrs = 3 + attrs := sockaddr.IPv4Attrs() + if len(attrs) != expectedNumAttrs { + t.Fatalf("wrong number of IPv4Attrs: %d vs %d", len(attrs), expectedNumAttrs) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/ipv6addr_test.go b/vendor/github.com/hashicorp/go-sockaddr/ipv6addr_test.go new file mode 100644 index 0000000000..62b1dfb28f --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/ipv6addr_test.go @@ -0,0 +1,725 @@ +package sockaddr_test + +import ( + "fmt" + "math/big" + "strings" + "testing" + + "github.com/hashicorp/go-sockaddr" +) + +// ipv6HostMask is an unexported big.Int representing a /128 IPv6 address +var ipv6HostMask sockaddr.IPv6Mask + +func init() { + biMask := big.NewInt(0) + biMask = biMask.SetBytes([]byte{ + 0xff, 0xff, + 0xff, 0xff, + 0xff, 0xff, + 0xff, 0xff, + 0xff, 0xff, + 0xff, 0xff, + 0xff, 0xff, + 0xff, 0xff, + }, + ) + ipv6HostMask = sockaddr.IPv6Mask(biMask) +} + +func newIPv6BigInt(t *testing.T, ipv6Str string) *big.Int { + addr := big.NewInt(0) + addrStr := strings.Join(strings.Split(ipv6Str, ":"), "") + _, ok := addr.SetString(addrStr, 16) + if !ok { + t.Fatal("Unable to create an IPv6Addr from string %+q", ipv6Str) + } + + return addr +} + +func newIPv6Address(t *testing.T, ipv6Str string) sockaddr.IPv6Address { + return sockaddr.IPv6Address(newIPv6BigInt(t, ipv6Str)) +} + +func newIPv6Mask(t *testing.T, ipv6Str string) sockaddr.IPv6Mask { + return sockaddr.IPv6Mask(newIPv6BigInt(t, ipv6Str)) +} + +func newIPv6Network(t *testing.T, ipv6Str string) sockaddr.IPv6Network { + return sockaddr.IPv6Network(newIPv6BigInt(t, ipv6Str)) +} + +func TestSockAddr_IPv6Addr(t *testing.T) { + tests := []struct { + z00_input string + z01_addrHexStr string + z02_addrBinStr string + z03_addrStr string + z04_NetIPStringOut string + z05_addrInt sockaddr.IPv6Address + z06_netInt sockaddr.IPv6Network + z07_ipMaskStr string + z08_maskbits int + z09_NetIPNetStringOut string + z10_maskInt sockaddr.IPv6Mask + z11_networkStr string + z12_octets []int + z13_firstUsable string + z14_lastUsable string + z16_portInt sockaddr.IPPort + z17_DialPacketArgs []string + z18_DialStreamArgs []string + z19_ListenPacketArgs []string + z20_ListenStreamArgs []string + z99_pass bool + }{ + { // 0 -- IPv4 fail + z00_input: "1.2.3.4", + z99_pass: false, + }, + { // 1 - IPv4 with port + z00_input: "5.6.7.8:80", + z99_pass: false, + }, + { // 2 - Hostname + z00_input: "www.hashicorp.com", + z99_pass: false, + }, + { // 3 - IPv6 with port, but no square brackets + z00_input: "2607:f0d0:1002:0051:0000:0000:0000:0004:8600", + z99_pass: false, + }, + { // 4 - IPv6 with port + z00_input: "[2607:f0d0:1002:0051:0000:0000:0000:0004]:8600", + z01_addrHexStr: "2607f0d0100200510000000000000004", + z02_addrBinStr: "00100110000001111111000011010000000100000000001000000000010100010000000000000000000000000000000000000000000000000000000000000100", + z03_addrStr: "[2607:f0d0:1002:51::4]:8600", + z04_NetIPStringOut: "2607:f0d0:1002:51::4", + z05_addrInt: newIPv6Address(t, "2607:f0d0:1002:0051:0000:0000:0000:0004"), + z06_netInt: newIPv6Network(t, "2607:f0d0:1002:0051:0000:0000:0000:0004"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "2607:f0d0:1002:51::4/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "2607:f0d0:1002:51::4", + z12_octets: []int{0x26, 0x7, 0xf0, 0xd0, 0x10, 0x2, 0x0, 0x51, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + z13_firstUsable: "2607:f0d0:1002:51::4", + z14_lastUsable: "2607:f0d0:1002:51::4", + z16_portInt: 8600, + z17_DialPacketArgs: []string{"udp6", "[2607:f0d0:1002:51::4]:8600"}, + z18_DialStreamArgs: []string{"tcp6", "[2607:f0d0:1002:51::4]:8600"}, + z19_ListenPacketArgs: []string{"udp6", "[2607:f0d0:1002:51::4]:8600"}, + z20_ListenStreamArgs: []string{"tcp6", "[2607:f0d0:1002:51::4]:8600"}, + z99_pass: true, + }, + { // 5 - IPv6 + z00_input: "2607:f0d0:1002:0051:0000:0000:0000:0004", + z01_addrHexStr: "2607f0d0100200510000000000000004", + z02_addrBinStr: "00100110000001111111000011010000000100000000001000000000010100010000000000000000000000000000000000000000000000000000000000000100", + z03_addrStr: "2607:f0d0:1002:51::4", + z04_NetIPStringOut: "2607:f0d0:1002:51::4", + z05_addrInt: newIPv6Address(t, "2607:f0d0:1002:0051:0000:0000:0000:0004"), + z06_netInt: newIPv6Network(t, "2607:f0d0:1002:0051:0000:0000:0000:0004"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "2607:f0d0:1002:51::4/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "2607:f0d0:1002:51::4", + z12_octets: []int{0x26, 0x7, 0xf0, 0xd0, 0x10, 0x2, 0x0, 0x51, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + z13_firstUsable: "2607:f0d0:1002:51::4", + z14_lastUsable: "2607:f0d0:1002:51::4", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", "[2607:f0d0:1002:51::4]:0"}, + z20_ListenStreamArgs: []string{"tcp6", "[2607:f0d0:1002:51::4]:0"}, + z99_pass: true, + }, + { // 6 IPv6 with square brackets, optional + z00_input: "[2607:f0d0:1002:0051:0000:0000:0000:0004]", + z01_addrHexStr: "2607f0d0100200510000000000000004", + z02_addrBinStr: "00100110000001111111000011010000000100000000001000000000010100010000000000000000000000000000000000000000000000000000000000000100", + z03_addrStr: "2607:f0d0:1002:51::4", + z04_NetIPStringOut: "2607:f0d0:1002:51::4", + z05_addrInt: newIPv6Address(t, "2607:f0d0:1002:0051:0000:0000:0000:0004"), + z06_netInt: newIPv6Network(t, "2607:f0d0:1002:0051:0000:0000:0000:0004"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "2607:f0d0:1002:51::4/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "2607:f0d0:1002:51::4", + z12_octets: []int{0x26, 0x7, 0xf0, 0xd0, 0x10, 0x2, 0x0, 0x51, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4}, + z13_firstUsable: "2607:f0d0:1002:51::4", + z14_lastUsable: "2607:f0d0:1002:51::4", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", "[2607:f0d0:1002:51::4]:0"}, + z20_ListenStreamArgs: []string{"tcp6", "[2607:f0d0:1002:51::4]:0"}, + z99_pass: true, + }, + { // 7 - unspecified address + z00_input: "0:0:0:0:0:0:0:0", + z01_addrHexStr: "00000000000000000000000000000000", + z02_addrBinStr: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + z03_addrStr: "::", + z04_NetIPStringOut: "::", + z05_addrInt: newIPv6Address(t, "0"), + z06_netInt: newIPv6Network(t, "0"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "::/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "::", + z12_octets: []int{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + z13_firstUsable: "::", + z14_lastUsable: "::", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", "[::]:0"}, + z20_ListenStreamArgs: []string{"tcp6", "[::]:0"}, + z99_pass: true, + }, + { // 8 - loopback address + z00_input: "0:0:0:0:0:0:0:1", + z01_addrHexStr: "00000000000000000000000000000001", + z02_addrBinStr: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + z03_addrStr: "::1", + z04_NetIPStringOut: "::1", + z05_addrInt: newIPv6Address(t, "0000:0000:0000:0000:0000:0000:0000:0001"), + z06_netInt: newIPv6Network(t, "0000:0000:0000:0000:0000:0000:0000:0001"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "::1/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "::1", + z12_octets: []int{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x01}, + z13_firstUsable: "::1", + z14_lastUsable: "::1", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", "[::1]:0"}, + z20_ListenStreamArgs: []string{"tcp6", "[::1]:0"}, + z99_pass: true, + }, + { // 9 - IPv6 with CIDR (RFC 3849) + z00_input: "2001:DB8::/32", + z01_addrHexStr: "20010db8000000000000000000000000", + z02_addrBinStr: "00100000000000010000110110111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + z03_addrStr: "2001:db8::/32", + z04_NetIPStringOut: "2001:db8::", + z05_addrInt: newIPv6Address(t, "20010db8000000000000000000000000"), + z06_netInt: newIPv6Network(t, "20010db8000000000000000000000000"), + z07_ipMaskStr: "ffffffff000000000000000000000000", + z08_maskbits: 32, + z09_NetIPNetStringOut: "2001:db8::/32", + z10_maskInt: newIPv6Mask(t, "ffffffff000000000000000000000000"), + z11_networkStr: "2001:db8::/32", + z12_octets: []int{0x20, 0x01, 0x0d, 0xb8, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + z13_firstUsable: "2001:db8::", + z14_lastUsable: "2001:db8:ffff:ffff:ffff:ffff:ffff:ffff", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", ""}, + z20_ListenStreamArgs: []string{"tcp6", ""}, + z99_pass: true, + }, + { // 10 - IPv6 ::1 + z00_input: "::1", + z01_addrHexStr: "00000000000000000000000000000001", + z02_addrBinStr: "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", + z03_addrStr: "::1", + z04_NetIPStringOut: "::1", + z05_addrInt: newIPv6Address(t, "00000000000000000000000000000001"), + z06_netInt: newIPv6Network(t, "00000000000000000000000000000001"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "::1/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "::1", + z12_octets: []int{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, + z13_firstUsable: "::1", + z14_lastUsable: "::1", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", "[::1]:0"}, + z20_ListenStreamArgs: []string{"tcp6", "[::1]:0"}, + z99_pass: true, + }, + { // 11 - IPv6 100:: + z00_input: "100::", + z01_addrHexStr: "01000000000000000000000000000000", + z02_addrBinStr: "00000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + z03_addrStr: "100::", + z04_NetIPStringOut: "100::", + z05_addrInt: newIPv6Address(t, "01000000000000000000000000000000"), + z06_netInt: newIPv6Network(t, "01000000000000000000000000000000"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "100::/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "100::", + z12_octets: []int{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + z13_firstUsable: "100::", + z14_lastUsable: "100::", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", "[100::]:0"}, + z20_ListenStreamArgs: []string{"tcp6", "[100::]:0"}, + z99_pass: true, + }, + { // 12 - IPv6 100::2 + z00_input: "100::2", + z01_addrHexStr: "01000000000000000000000000000002", + z02_addrBinStr: "00000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010", + z03_addrStr: "100::2", + z04_NetIPStringOut: "100::2", + z05_addrInt: newIPv6Address(t, "01000000000000000000000000000002"), + z06_netInt: newIPv6Network(t, "01000000000000000000000000000002"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "100::2/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "100::2", + z12_octets: []int{0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x02}, + z13_firstUsable: "100::2", + z14_lastUsable: "100::2", + z17_DialPacketArgs: []string{"udp6", ""}, + z18_DialStreamArgs: []string{"tcp6", ""}, + z19_ListenPacketArgs: []string{"udp6", "[100::2]:0"}, + z20_ListenStreamArgs: []string{"tcp6", "[100::2]:0"}, + z99_pass: true, + }, + { // 13 - IPv6 `[100::2]:80` + z00_input: "[100::2]:80", + z01_addrHexStr: "01000000000000000000000000000002", + z02_addrBinStr: "00000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010", + z03_addrStr: "[100::2]:80", + z04_NetIPStringOut: "100::2", + z05_addrInt: newIPv6Address(t, "01000000000000000000000000000002"), + z06_netInt: newIPv6Network(t, "01000000000000000000000000000002"), + z07_ipMaskStr: "ffffffffffffffffffffffffffffffff", + z08_maskbits: 128, + z09_NetIPNetStringOut: "100::2/128", + z10_maskInt: newIPv6Mask(t, "ffffffffffffffffffffffffffffffff"), + z11_networkStr: "100::2", + z12_octets: []int{0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x02}, + z13_firstUsable: "100::2", + z14_lastUsable: "100::2", + z16_portInt: 80, + z17_DialPacketArgs: []string{"udp6", "[100::2]:80"}, + z18_DialStreamArgs: []string{"tcp6", "[100::2]:80"}, + z19_ListenPacketArgs: []string{"udp6", "[100::2]:80"}, + z20_ListenStreamArgs: []string{"tcp6", "[100::2]:80"}, + z99_pass: true, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv6, err := sockaddr.NewIPv6Addr(test.z00_input) + if test.z99_pass && err != nil { + t.Fatalf("[%d] Unable to create an IPv6Addr from %+q: %v", idx, test.z00_input, err) + } else if !test.z99_pass && err == nil { + t.Fatalf("[%d] Expected test to fail for %+q", idx, test.z00_input) + } else if !test.z99_pass && err != nil { + // Expected failure, return success + return + } + + if type_ := ipv6.Type(); type_ != sockaddr.TypeIPv6 { + t.Errorf("[%d] Expected new IPv6Addr to be Type %d, received %d (int)", idx, sockaddr.TypeIPv6, type_) + } + + h, ok := ipv6.Host().(sockaddr.IPv6Addr) + if !ok { + t.Errorf("[%d] Unable to type assert +%q's Host to IPv6Addr", idx, test.z00_input) + } + + hAddressBigInt := big.Int(*h.Address) + hMaskBigInt := big.Int(*h.Mask) + if hAddressBigInt.Cmp(ipv6.Address) != 0 || hMaskBigInt.Cmp(ipv6HostMask) != 0 || h.Port != ipv6.Port { + t.Errorf("[%d] Expected %+q's Host() to return identical IPv6Addr except mask, received %+q", idx, test.z00_input, h.String()) + } + + if c := cap(*ipv6.NetIP()); c != sockaddr.IPv6len { + t.Errorf("[%d] Expected new IPv6Addr's Address capacity to be %d bytes, received %d", idx, sockaddr.IPv6len, c) + } + + if l := len(*ipv6.NetIP()); l != sockaddr.IPv6len { + t.Errorf("[%d] Expected new IPv6Addr's Address length to be %d bytes, received %d", idx, sockaddr.IPv6len, l) + } + + if s := ipv6.AddressHexString(); s != test.z01_addrHexStr { + t.Errorf("[%d] Expected address %+q's hexadecimal representation to be %+q, received %+q", idx, test.z00_input, test.z01_addrHexStr, s) + } + + if s := ipv6.AddressBinString(); s != test.z02_addrBinStr { + t.Errorf("[%d] Expected address %+q's binary representation to be %+q, received %+q", idx, test.z00_input, test.z02_addrBinStr, s) + } + + if s := ipv6.String(); s != test.z03_addrStr { + t.Errorf("[%d] Expected %+q's String to be %+q, received %+q", idx, test.z00_input, test.z03_addrStr, s) + } + + if s := ipv6.NetIP().String(); s != test.z04_NetIPStringOut { + t.Errorf("[%d] Expected %+q's address to be %+q, received %+q", idx, test.z00_input, test.z04_NetIPStringOut, s) + } + + if hAddressBigInt.Cmp(test.z05_addrInt) != 0 { + t.Errorf("[%d] Expected %+q's Address to return %+v, received %+v", idx, test.z00_input, test.z05_addrInt, hAddressBigInt) + } + + n, ok := ipv6.Network().(sockaddr.IPv6Addr) + if !ok { + t.Errorf("[%d] Unable to type assert +%q's Network to IPv6Addr", idx, test.z00_input) + } + + nAddressBigInt := big.Int(*n.Address) + if nAddressBigInt.Cmp(test.z06_netInt) != 0 { + t.Errorf("[%d] Expected %+q's Network to return %+v, received %+v", idx, test.z00_input, test.z06_netInt, n.Address) + } + + if m := ipv6.NetIPMask().String(); m != test.z07_ipMaskStr { + t.Errorf("[%d] Expected %+q's mask to be %+q, received %+q", idx, test.z00_input, test.z07_ipMaskStr, m) + } + + if m := ipv6.Maskbits(); m != test.z08_maskbits { + t.Errorf("[%dr] Expected %+q's port to be %+v, received %+v", idx, test.z00_input, test.z08_maskbits, m) + } + + if n := ipv6.NetIPNet().String(); n != test.z09_NetIPNetStringOut { + t.Errorf("[%d] Expected %+q's network to be %+q, received %+q", idx, test.z00_input, test.z09_NetIPNetStringOut, n) + } + + ipv6MaskBigInt := big.Int(*ipv6.Mask) + if ipv6MaskBigInt.Cmp(test.z10_maskInt) != 0 { + t.Errorf("[%d] Expected %+q's Mask to return %+v, received %+v", idx, test.z00_input, test.z10_maskInt, ipv6MaskBigInt) + } + + nMaskBigInt := big.Int(*n.Mask) + if nMaskBigInt.Cmp(test.z10_maskInt) != 0 { + t.Errorf("[%d] Expected %+q's Network's Mask to return %+v, received %+v", idx, test.z00_input, test.z10_maskInt, nMaskBigInt) + } + + // Network()'s mask must match the IPv6Addr's Mask + if n := ipv6.Network().String(); n != test.z11_networkStr { + t.Errorf("[%d] Expected %+q's Network() to be %+q, received %+q", idx, test.z00_input, test.z11_networkStr, n) + } + + if o := ipv6.Octets(); len(o) != 16 || cap(o) != 16 || + o[0] != test.z12_octets[0] || o[1] != test.z12_octets[1] || + o[2] != test.z12_octets[2] || o[3] != test.z12_octets[3] || + o[4] != test.z12_octets[4] || o[5] != test.z12_octets[5] || + o[6] != test.z12_octets[6] || o[7] != test.z12_octets[7] || + o[8] != test.z12_octets[8] || o[9] != test.z12_octets[9] || + o[10] != test.z12_octets[10] || o[11] != test.z12_octets[11] || + o[12] != test.z12_octets[12] || o[13] != test.z12_octets[13] || + o[14] != test.z12_octets[14] || o[15] != test.z12_octets[15] { + t.Errorf("[%d] Expected %+q's Octets to be %x, received %x", idx, test.z00_input, test.z12_octets, o) + } + + if f := ipv6.FirstUsable().String(); f != test.z13_firstUsable { + t.Errorf("[%d] Expected %+q's FirstUsable() to be %+q, received %+q", idx, test.z00_input, test.z13_firstUsable, f) + } + + if l := ipv6.LastUsable().String(); l != test.z14_lastUsable { + t.Errorf("[%d] Expected %+q's LastUsable() to be %+q, received %+q", idx, test.z00_input, test.z14_lastUsable, l) + } + + if p := ipv6.IPPort(); sockaddr.IPPort(p) != test.z16_portInt || sockaddr.IPPort(p) != test.z16_portInt { + t.Errorf("[%d] Expected %+q's port to be %+v, received %+v", idx, test.z00_input, test.z16_portInt, p) + } + + if dialNet, dialArgs := ipv6.DialPacketArgs(); dialNet != test.z17_DialPacketArgs[0] || dialArgs != test.z17_DialPacketArgs[1] { + t.Errorf("[%d] Expected %+q's DialPacketArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z17_DialPacketArgs, dialNet, dialArgs) + } + + if dialNet, dialArgs := ipv6.DialStreamArgs(); dialNet != test.z18_DialStreamArgs[0] || dialArgs != test.z18_DialStreamArgs[1] { + t.Errorf("[%d] Expected %+q's DialStreamArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z18_DialStreamArgs, dialNet, dialArgs) + } + + if listenNet, listenArgs := ipv6.ListenPacketArgs(); listenNet != test.z19_ListenPacketArgs[0] || listenArgs != test.z19_ListenPacketArgs[1] { + t.Errorf("[%d] Expected %+q's ListenPacketArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z19_ListenPacketArgs, listenNet, listenArgs) + } + + if listenNet, listenArgs := ipv6.ListenStreamArgs(); listenNet != test.z20_ListenStreamArgs[0] || listenArgs != test.z20_ListenStreamArgs[1] { + t.Errorf("[%d] Expected %+q's ListenStreamArgs() to be %+q, received %+q, %+q", idx, test.z00_input, test.z20_ListenStreamArgs, listenNet, listenArgs) + } + }) + } +} + +func TestSockAddr_IPv6Addr_CmpAddress(t *testing.T) { + tests := []struct { + a string + b string + cmp int + }{ + { // 0 + a: "2001:4860:0:2001::68/128", + b: "2001:4860:0:2001::68", + cmp: 0, + }, + { // 1 + a: "2607:f0d0:1002:0051:0000:0000:0000:0004/128", + b: "2607:f0d0:1002:0051:0000:0000:0000:0004", + cmp: 0, + }, + { // 2 + a: "2607:f0d0:1002:0051:0000:0000:0000:0004/128", + b: "2607:f0d0:1002:0051:0000:0000:0000:0004/64", + cmp: 0, + }, + { // 3 + a: "2607:f0d0:1002:0051:0000:0000:0000:0004", + b: "2607:f0d0:1002:0051:0000:0000:0000:0005", + cmp: -1, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv6a, err := sockaddr.NewIPv6Addr(test.a) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv6Addr from %+q: %v", idx, test.a, err) + } + + ipv6b, err := sockaddr.NewIPv6Addr(test.b) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv6Addr from %+q: %v", idx, test.b, err) + } + + if x := ipv6a.CmpAddress(ipv6b); x != test.cmp { + t.Errorf("[%d] IPv6Addr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipv6a, ipv6b, test.cmp, x) + } + + if x := ipv6b.CmpAddress(ipv6a); x*-1 != test.cmp { + t.Errorf("[%d] IPv6Addr.CmpAddress() failed with %+q with %+q (expected %d, received %d)", idx, ipv6a, ipv6b, test.cmp, x) + } + }) + } +} + +func TestSockAddr_IPv6Addr_ContainsAddress(t *testing.T) { + tests := []struct { + name string + input sockaddr.IPv6Addr + cases []sockaddr.IPv6Addr + fail bool + }{ + { + name: "basic", + input: sockaddr.MustIPv6Addr("::1/128"), + cases: []sockaddr.IPv6Addr{ + sockaddr.MustIPv6Addr("::1"), + sockaddr.MustIPv6Addr("[::1/128]"), + }, + }, + { + name: "fail", + input: sockaddr.MustIPv6Addr("::1/128"), + cases: []sockaddr.IPv6Addr{ + sockaddr.MustIPv6Addr("100::"), + }, + fail: true, + }, + { + name: "fail2", + input: sockaddr.MustIPv6Addr("100::/128"), + cases: []sockaddr.IPv6Addr{ + sockaddr.MustIPv6Addr("::1"), + }, + fail: true, + }, + } + + for idx, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", idx) + } + + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv6 := test.input + + for _, tc := range test.cases { + if ipv6.ContainsAddress(tc.Address) == test.fail { + t.Errorf("%s: Expected %q.ContainsAddress(%q)==%t", test.name, ipv6, tc, test.fail) + } + } + }) + } +} + +func TestSockAddr_IPv6Addr_ContainsNetwork(t *testing.T) { + tests := []struct { + input string + pass []string + fail []string + }{ + { // 0 + input: "::1/128", + pass: []string{ + "::1", + "[::1/128]", + }, + fail: []string{ + "100::", + }, + }, + } + + for idx, test := range tests { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + ipv6, err := sockaddr.NewIPv6Addr(test.input) + if err != nil { + t.Fatalf("[%d] Unable to create an IPv6Addr from %+q: %v", idx, test.input, err) + } + + for passIdx, passInput := range test.pass { + passAddr, err := sockaddr.NewIPv6Addr(passInput) + if err != nil { + t.Fatalf("[%d/%d] Unable to create an IPv6Addr from %+q: %v", idx, passIdx, passInput, err) + } + + if !passAddr.ContainsNetwork(ipv6) { + t.Errorf("[%d/%d] Expected %+q to contain %+q", idx, passIdx, test.input, passInput) + } + } + + for failIdx, failInput := range test.fail { + failAddr, err := sockaddr.NewIPv6Addr(failInput) + if err != nil { + t.Fatalf("[%d/%d] Unable to create an IPv6Addr from %+q: %v", idx, failIdx, failInput, err) + } + + if failAddr.ContainsNetwork(ipv6) { + t.Errorf("[%d/%d] Expected %+q to contain %+q", idx, failIdx, test.input, failInput) + } + } + }) + } +} + +func TestSockAddr_IPv6Addr_Equal(t *testing.T) { + tests := []struct { + name string + input sockaddr.IPv6Addr + cases sockaddr.SockAddrs + fail bool + }{ + { + name: "addr equal", + input: sockaddr.MustIPv6Addr("2001:4860:0:2001::68/128"), + cases: sockaddr.SockAddrs{ + sockaddr.MustIPv6Addr("2001:4860:0:2001::68"), + sockaddr.MustIPv6Addr("2001:4860:0:2001::68/128"), + sockaddr.MustIPv6Addr("[2001:4860:0:2001::68]:0"), + }, + }, + { + name: "IPv6Addr not equal", + input: sockaddr.MustIPv6Addr("2001:4860:0:2001::68/128"), + cases: sockaddr.SockAddrs{ + sockaddr.MustIPv6Addr("2001:DB8::/48"), + sockaddr.MustIPv6Addr("2001:4860:0:2001::67/128"), + sockaddr.MustIPv6Addr("2001:4860:0:2001::67"), + sockaddr.MustIPv6Addr("[2001:4860:0:2001::68]:80"), + sockaddr.MustIPv4Addr("1.2.3.4"), + sockaddr.MustUnixSock("/tmp/foo"), + }, + fail: true, + }, + { + name: "equal CIDR", + input: sockaddr.MustIPv6Addr("2001:4860:0:2001::68/64"), + cases: sockaddr.SockAddrs{ + sockaddr.MustIPv6Addr("2001:4860:0:2001::68/64"), + }, + }, + { + name: "not equal CIDR", + input: sockaddr.MustIPv6Addr("2001:4860:0:2001::68/64"), + cases: sockaddr.SockAddrs{ + sockaddr.MustIPv6Addr("2001:DB8::/48"), + sockaddr.MustIPv6Addr("2001:4860:0:2001::67/128"), + sockaddr.MustIPv6Addr("2001:4860:0:2001::67"), + sockaddr.MustIPv6Addr("[2001:4860:0:2001::68]:80"), + sockaddr.MustIPv4Addr("1.2.3.4/32"), + sockaddr.MustUnixSock("/tmp/foo"), + }, + fail: true, + }, + } + + for idx, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", idx) + } + + t.Run(test.name, func(t *testing.T) { + ipv6 := test.input + for _, tc := range test.cases { + if ipv6.Equal(tc) == test.fail { + t.Errorf("%s: Expected %s Equal(%q)=%t", test.name, ipv6, tc, test.fail) + } + } + }) + } +} + +func TestIPv6Addr_CmpRFC(t *testing.T) { + tests := []struct { + name string + recv sockaddr.SockAddr + arg sockaddr.SockAddr + rfcNum uint + want int + }{ + { + name: "simple in RFC", + recv: sockaddr.MustIPv6Addr("::1"), + arg: sockaddr.MustIPv6Addr("100::"), + rfcNum: 6590, + }, + { + name: "ipv6 cmp IPv4", + recv: sockaddr.MustIPv6Addr("2002:c058:6301::/120"), + arg: sockaddr.MustIPv4Addr("192.88.99.0/24"), + rfcNum: 3068, + want: -1, + }, + { + name: "ipv6 cmp IPv4", + recv: sockaddr.MustIPv6Addr("::1"), + arg: sockaddr.MustIPv4Addr("1.2.3.4"), + rfcNum: 6590, + }, + { + name: "ipv6 cmp IPv4", + recv: sockaddr.MustIPv6Addr("::1"), + arg: sockaddr.MustIPv4Addr("192.168.1.1"), + rfcNum: 1918, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + t.Run(test.name, func(t *testing.T) { + if cmp := test.recv.CmpRFC(test.rfcNum, test.arg); cmp != test.want { + t.Fatalf("%s: want %d got %d", test.name, test.want, cmp) + } + }) + } +} + +func TestIPv6Attrs(t *testing.T) { + const expectedNumAttrs = 2 + attrs := sockaddr.IPv6Attrs() + if len(attrs) != expectedNumAttrs { + t.Fatalf("wrong number of IPv6Attrs: %d vs %d", len(attrs), expectedNumAttrs) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/rfc_test.go b/vendor/github.com/hashicorp/go-sockaddr/rfc_test.go new file mode 100644 index 0000000000..af669a43f3 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/rfc_test.go @@ -0,0 +1,63 @@ +package sockaddr_test + +import ( + "testing" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +func TestVisitAllRFCs(t *testing.T) { + const expectedNumRFCs = 28 + numRFCs := 0 + sockaddr.VisitAllRFCs(func(rfcNum uint, sas sockaddr.SockAddrs) { + numRFCs++ + }) + if numRFCs != expectedNumRFCs { + t.Fatalf("wrong number of RFCs: %d", numRFCs) + } +} + +func TestIsRFC(t *testing.T) { + tests := []struct { + name string + sa sockaddr.SockAddr + rfcNum uint + result bool + }{ + { + name: "rfc1918 pass", + sa: sockaddr.MustIPv4Addr("192.168.0.0/16"), + rfcNum: 1918, + result: true, + }, + { + name: "rfc1918 fail", + sa: sockaddr.MustIPv4Addr("1.2.3.4"), + rfcNum: 1918, + result: false, + }, + { + name: "rfc1918 pass", + sa: sockaddr.MustIPv4Addr("192.168.1.1"), + rfcNum: 1918, + result: true, + }, + { + name: "invalid rfc", + sa: sockaddr.MustIPv4Addr("192.168.0.0/16"), + rfcNum: 999999999999, + result: false, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + result := sockaddr.IsRFC(test.rfcNum, test.sa) + if result != test.result { + t.Fatalf("expected a match") + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/route_info_test.go b/vendor/github.com/hashicorp/go-sockaddr/route_info_test.go new file mode 100644 index 0000000000..1716327e81 --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/route_info_test.go @@ -0,0 +1,196 @@ +package sockaddr + +import "testing" + +func Test_parseBSDDefaultIfName(t *testing.T) { + testCases := []struct { + name string + routeOut string + want string + }{ + { + name: "macOS Sierra 10.12 - Common", + routeOut: ` route to: default +destination: default + mask: default + gateway: 10.23.9.1 + interface: en0 + flags: + recvpipe sendpipe ssthresh rtt,msec rttvar hopcount mtu expire + 0 0 0 0 0 0 1500 0 +`, + want: "en0", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := parseDefaultIfNameFromRoute(tc.routeOut) + if err != nil { + t.Fatalf("unable to parse default interface from route output: %v", err) + } + + if got != tc.want { + t.Errorf("got %s; want %s", got, tc.want) + } + }) + } +} + +func Test_parseLinuxDefaultIfName(t *testing.T) { + testCases := []struct { + name string + routeOut string + want string + }{ + { + name: "Linux Ubuntu 14.04 - Common", + routeOut: `default via 10.1.2.1 dev eth0 +10.1.2.0/24 dev eth0 proto kernel scope link src 10.1.2.5 +`, + want: "eth0", + }, + { + name: "Chromebook - 8743.85.0 (Official Build) stable-channel gandof, Milestone 54", + routeOut: `default via 192.168.1.1 dev wlan0 metric 1 +192.168.1.0/24 dev wlan0 proto kernel scope link src 192.168.1.174 +`, + want: "wlan0", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := parseDefaultIfNameFromIPCmd(tc.routeOut) + if err != nil { + t.Fatalf("unable to parse default interface from route output: %v", err) + } + + if got != tc.want { + t.Errorf("got %+q; want %+q", got, tc.want) + } + }) + } +} + +func Test_parseWindowsDefaultIfName(t *testing.T) { + testCases := []struct { + name string + routeOut string + ipconfigOut string + want string + }{ + { + name: "Windows 10 - Enterprise", + routeOut: `=========================================================================== +Interface List + 10...08 00 27 a2 e9 51 ......Intel(R) PRO/1000 MT Desktop Adapter + 13...08 00 27 35 02 ed ......Intel(R) PRO/1000 MT Desktop Adapter #2 + 1...........................Software Loopback Interface 1 + 5...00 00 00 00 00 00 00 e0 Microsoft ISATAP Adapter + 8...00 00 00 00 00 00 00 e0 Microsoft ISATAP Adapter #3 +=========================================================================== + +IPv4 Route Table +=========================================================================== +Active Routes: +Network Destination Netmask Gateway Interface Metric + 0.0.0.0 0.0.0.0 10.0.2.2 10.0.2.15 25 + 10.0.2.0 255.255.255.0 On-link 10.0.2.15 281 + 10.0.2.15 255.255.255.255 On-link 10.0.2.15 281 + 10.0.2.255 255.255.255.255 On-link 10.0.2.15 281 + 127.0.0.0 255.0.0.0 On-link 127.0.0.1 331 + 127.0.0.1 255.255.255.255 On-link 127.0.0.1 331 + 127.255.255.255 255.255.255.255 On-link 127.0.0.1 331 + 192.168.56.0 255.255.255.0 On-link 192.168.56.100 281 + 192.168.56.100 255.255.255.255 On-link 192.168.56.100 281 + 192.168.56.255 255.255.255.255 On-link 192.168.56.100 281 + 224.0.0.0 240.0.0.0 On-link 127.0.0.1 331 + 224.0.0.0 240.0.0.0 On-link 192.168.56.100 281 + 224.0.0.0 240.0.0.0 On-link 10.0.2.15 281 + 255.255.255.255 255.255.255.255 On-link 127.0.0.1 331 + 255.255.255.255 255.255.255.255 On-link 192.168.56.100 281 + 255.255.255.255 255.255.255.255 On-link 10.0.2.15 281 +=========================================================================== +Persistent Routes: + None + +IPv6 Route Table +=========================================================================== +Active Routes: + If Metric Network Destination Gateway + 1 331 ::1/128 On-link + 13 281 fe80::/64 On-link + 10 281 fe80::/64 On-link + 13 281 fe80::60cc:155f:77a4:ab99/128 + On-link + 10 281 fe80::cccc:710e:f5bb:3088/128 + On-link + 1 331 ff00::/8 On-link + 13 281 ff00::/8 On-link + 10 281 ff00::/8 On-link +=========================================================================== +Persistent Routes: + None +`, + ipconfigOut: `Windows IP Configuration + + +Ethernet adapter Ethernet: + + Connection-specific DNS Suffix . : host.example.org + Link-local IPv6 Address . . . . . : fe80::cccc:710e:f5bb:3088%10 + IPv4 Address. . . . . . . . . . . : 10.0.2.15 + Subnet Mask . . . . . . . . . . . : 255.255.255.0 + Default Gateway . . . . . . . . . : 10.0.2.2 + +Ethernet adapter Ethernet 2: + + Connection-specific DNS Suffix . : + Link-local IPv6 Address . . . . . : fe80::60cc:155f:77a4:ab99%13 + IPv4 Address. . . . . . . . . . . : 192.168.56.100 + Subnet Mask . . . . . . . . . . . : 255.255.255.0 + Default Gateway . . . . . . . . . : + +Tunnel adapter isatap.host.example.org: + + Media State . . . . . . . . . . . : Media disconnected + Connection-specific DNS Suffix . : + +Tunnel adapter Reusable ISATAP Interface {F3F2E4A5-8823-40E5-87EA-1F6881BACC95}: + + Media State . . . . . . . . . . . : Media disconnected + Connection-specific DNS Suffix . : host.example.org +`, + want: "Ethernet", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := parseDefaultIfNameWindows(tc.routeOut, tc.ipconfigOut) + if err != nil { + t.Fatalf("unable to parse default interface from route output: %v", err) + } + + if got != tc.want { + t.Errorf("got %s; want %s", got, tc.want) + } + }) + } +} + +func Test_VisitComands(t *testing.T) { + ri, err := NewRouteInfo() + if err != nil { + t.Fatalf("bad: %v", err) + } + + var count int + ri.VisitCommands(func(name string, cmd []string) { + count++ + }) + if count == 0 { + t.Fatalf("Expected more than 0 items") + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddr_test.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddr_test.go new file mode 100644 index 0000000000..2471beb24f --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddr_test.go @@ -0,0 +1,440 @@ +package sockaddr_test + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/hashicorp/go-sockaddr" +) + +// TODO(sean@): Either extend this test to include IPv6Addr and UnixSock, or +// remove and find a good home to test this functionality elsewhere. + +func TestSockAddr_New(t *testing.T) { + type SockAddrFixture struct { + input string + ResultType string + NetworkAddress string + BroadcastAddress string + IPUint32 sockaddr.IPv4Address + Maskbits int + BinString string + HexString string + FirstUsableAddress string + LastUsableAddress string + } + type SockAddrFixtures []SockAddrFixtures + + goodResults := []SockAddrFixture{ + { + input: "0.0.0.0", + ResultType: "ipv4", + NetworkAddress: "0.0.0.0", + BroadcastAddress: "0.0.0.0", + Maskbits: 32, + IPUint32: 0, + BinString: "00000000000000000000000000000000", + HexString: "00000000", + FirstUsableAddress: "0.0.0.0", + LastUsableAddress: "0.0.0.0", + }, + { + input: "0.0.0.0/0", + ResultType: "ipv4", + NetworkAddress: "0.0.0.0", + BroadcastAddress: "255.255.255.255", + Maskbits: 0, + IPUint32: 0, + BinString: "00000000000000000000000000000000", + HexString: "00000000", + FirstUsableAddress: "0.0.0.1", + LastUsableAddress: "255.255.255.254", + }, + { + input: "0.0.0.1", + ResultType: "ipv4", + NetworkAddress: "0.0.0.1", + BroadcastAddress: "0.0.0.1", + Maskbits: 32, + IPUint32: 1, + BinString: "00000000000000000000000000000001", + HexString: "00000001", + FirstUsableAddress: "0.0.0.1", + LastUsableAddress: "0.0.0.1", + }, + { + input: "0.0.0.1/1", + ResultType: "ipv4", + NetworkAddress: "0.0.0.0", + BroadcastAddress: "127.255.255.255", + Maskbits: 1, + IPUint32: 1, + BinString: "00000000000000000000000000000001", + HexString: "00000001", + FirstUsableAddress: "0.0.0.1", + LastUsableAddress: "127.255.255.254", + }, + { + input: "128.0.0.0", + ResultType: "ipv4", + NetworkAddress: "128.0.0.0", + BroadcastAddress: "128.0.0.0", + Maskbits: 32, + IPUint32: 2147483648, + BinString: "10000000000000000000000000000000", + HexString: "80000000", + FirstUsableAddress: "128.0.0.0", + LastUsableAddress: "128.0.0.0", + }, + { + input: "255.255.255.255", + ResultType: "ipv4", + NetworkAddress: "255.255.255.255", + BroadcastAddress: "255.255.255.255", + Maskbits: 32, + IPUint32: 4294967295, + BinString: "11111111111111111111111111111111", + HexString: "ffffffff", + FirstUsableAddress: "255.255.255.255", + LastUsableAddress: "255.255.255.255", + }, + { + input: "1.2.3.4", + ResultType: "ipv4", + NetworkAddress: "1.2.3.4", + BroadcastAddress: "1.2.3.4", + Maskbits: 32, + IPUint32: 16909060, + BinString: "00000001000000100000001100000100", + HexString: "01020304", + FirstUsableAddress: "1.2.3.4", + LastUsableAddress: "1.2.3.4", + }, + { + input: "192.168.10.10/16", + ResultType: "ipv4", + NetworkAddress: "192.168.0.0", + BroadcastAddress: "192.168.255.255", + Maskbits: 16, + IPUint32: 3232238090, + BinString: "11000000101010000000101000001010", + HexString: "c0a80a0a", + FirstUsableAddress: "192.168.0.1", + LastUsableAddress: "192.168.255.254", + }, + { + input: "192.168.1.10/24", + ResultType: "ipv4", + NetworkAddress: "192.168.1.0", + BroadcastAddress: "192.168.1.255", + Maskbits: 24, + IPUint32: 3232235786, + BinString: "11000000101010000000000100001010", + HexString: "c0a8010a", + FirstUsableAddress: "192.168.1.1", + LastUsableAddress: "192.168.1.254", + }, + { + input: "192.168.0.1", + ResultType: "ipv4", + NetworkAddress: "192.168.0.1", + BroadcastAddress: "192.168.0.1", + Maskbits: 32, + IPUint32: 3232235521, + BinString: "11000000101010000000000000000001", + HexString: "c0a80001", + FirstUsableAddress: "192.168.0.1", + LastUsableAddress: "192.168.0.1", + }, + { + input: "192.168.0.2/31", + ResultType: "ipv4", + NetworkAddress: "192.168.0.2", + BroadcastAddress: "192.168.0.3", + Maskbits: 31, + IPUint32: 3232235522, + BinString: "11000000101010000000000000000010", + HexString: "c0a80002", + FirstUsableAddress: "192.168.0.2", + LastUsableAddress: "192.168.0.3", + }, + { + input: "240.0.0.0/4", + ResultType: "ipv4", + NetworkAddress: "240.0.0.0", + BroadcastAddress: "255.255.255.255", + Maskbits: 4, + IPUint32: 4026531840, + BinString: "11110000000000000000000000000000", + HexString: "f0000000", + FirstUsableAddress: "240.0.0.1", + LastUsableAddress: "255.255.255.254", + }, + } + + for idx, r := range goodResults { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + var ( + addr sockaddr.IPAddr + str string + ) + + sa, err := sockaddr.NewSockAddr(r.input) + if err != nil { + t.Fatalf("Failed parse %s", r.input) + } + + switch r.ResultType { + case "ipv4": + ipv4b, err := sockaddr.NewIPv4Addr(r.input) + if err != nil { + t.Fatalf("[%d] Unable to construct a new IPv4 from %s: %s", idx, r.input, err) + } + if !ipv4b.Equal(sa) { + t.Fatalf("[%d] Equality comparison failed on fresh IPv4", idx) + } + + type_ := sa.Type() + if type_ != sockaddr.TypeIPv4 { + t.Fatalf("[%d] Type mismatch for %s: %d", idx, r.input, type_) + } + + ipv4 := sockaddr.ToIPv4Addr(sa) + if ipv4 == nil { + t.Fatalf("[%d] Failed ToIPv4Addr() %s", idx, r.input) + } + + addr = ipv4.Broadcast() + if addr == nil || addr.NetIP().To4().String() != r.BroadcastAddress { + t.Fatalf("Failed IPv4Addr.BroadcastAddress() %s: expected %+q, received %+q", r.input, r.BroadcastAddress, addr.NetIP().To4().String()) + } + + maskbits := ipv4.Maskbits() + if maskbits != r.Maskbits { + t.Fatalf("Failed Maskbits %s: %d != %d", r.input, maskbits, r.Maskbits) + } + + if ipv4.Address != r.IPUint32 { + t.Fatalf("Failed ToUint32() %s: %d != %d", r.input, ipv4.Address, r.IPUint32) + } + + str = ipv4.AddressBinString() + if str != r.BinString { + t.Fatalf("Failed BinString %s: %s != %s", r.input, str, r.BinString) + } + + str = ipv4.AddressHexString() + if str != r.HexString { + t.Fatalf("Failed HexString %s: %s != %s", r.input, str, r.HexString) + } + + addr = ipv4.Network() + if addr == nil || addr.NetIP().To4().String() != r.NetworkAddress { + t.Fatalf("Failed NetworkAddress %s: %s != %s", r.input, addr.NetIP().To4().String(), r.NetworkAddress) + } + + addr = ipv4.FirstUsable() + if addr == nil || addr.NetIP().To4().String() != r.FirstUsableAddress { + t.Fatalf("Failed FirstUsableAddress %s: %s != %s", r.input, addr.NetIP().To4().String(), r.FirstUsableAddress) + } + + addr = ipv4.LastUsable() + if addr == nil || addr.NetIP().To4().String() != r.LastUsableAddress { + t.Fatalf("Failed LastUsableAddress %s: %s != %s", r.input, addr.NetIP().To4().String(), r.LastUsableAddress) + } + default: + t.Fatalf("Unknown result type: %s", r.ResultType) + } + }) + } + + badResults := []string{ + "256.0.0.0", + "0.0.0.0.0", + } + + for idx, badIP := range badResults { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + sa, err := sockaddr.NewSockAddr(badIP) + if err == nil { + t.Fatalf("Failed should have failed to parse %s: %v", badIP, sa) + } + if sa != nil { + t.Fatalf("SockAddr should be nil") + } + }) + } + +} + +func TestSockAddrAttrs(t *testing.T) { + const expectedNumAttrs = 2 + saa := sockaddr.SockAddrAttrs() + if len(saa) != expectedNumAttrs { + t.Fatalf("wrong number of SockAddrAttrs: %d vs %d", len(saa), expectedNumAttrs) + } + + tests := []struct { + name string + sa sockaddr.SockAddr + attr sockaddr.AttrName + want string + }{ + { + name: "type", + sa: sockaddr.MustIPv4Addr("1.2.3.4"), + attr: "type", + want: "IPv4", + }, + { + name: "string", + sa: sockaddr.MustIPv4Addr("1.2.3.4"), + attr: "string", + want: "1.2.3.4", + }, + { + name: "invalid", + sa: sockaddr.MustIPv4Addr("1.2.3.4"), + attr: "ENOENT", + want: "", + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + result := sockaddr.SockAddrAttr(test.sa, test.attr) + if result != test.want { + t.Fatalf("%s: expected %s got %s", test.name, test.want, result) + } + } +} + +func TestToFoo(t *testing.T) { + tests := []struct { + name string + sa sockaddr.SockAddr + passIP bool + passIPv4 bool + passIPv6 bool + passUnix bool + }{ + { + name: "ipv4", + sa: sockaddr.MustIPv4Addr("1.2.3.4"), + passIP: true, + passIPv4: true, + }, + { + name: "ipv6", + sa: sockaddr.MustIPv6Addr("::1"), + passIP: true, + passIPv6: true, + }, + { + name: "unix", + sa: sockaddr.MustUnixSock("/tmp/foo"), + passUnix: true, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d must have a name", i) + } + + switch us := sockaddr.ToUnixSock(test.sa); { + case us == nil && test.passUnix, + us != nil && !test.passUnix: + t.Fatalf("bad") + } + + switch ip := sockaddr.ToIPAddr(test.sa); { + case ip == nil && test.passIP, + ip != nil && !test.passIP: + t.Fatalf("bad") + } + + switch ipv4 := sockaddr.ToIPv4Addr(test.sa); { + case ipv4 == nil && test.passIPv4, + ipv4 != nil && !test.passIPv4: + t.Fatalf("bad") + } + + switch ipv6 := sockaddr.ToIPv6Addr(test.sa); { + case ipv6 == nil && test.passIPv6, + ipv6 != nil && !test.passIPv6: + t.Fatalf("bad") + } + } + +} + +func TestSockAddrMarshaler(t *testing.T) { + addr := "192.168.10.24/24" + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + t.Fatal(err) + } + sam := &sockaddr.SockAddrMarshaler{ + SockAddr: sa, + } + marshaled, err := json.Marshal(sam) + if err != nil { + t.Fatal(err) + } + sam2 := &sockaddr.SockAddrMarshaler{} + err = json.Unmarshal(marshaled, sam2) + if err != nil { + t.Fatal(err) + } + if sam.SockAddr.String() != sam2.SockAddr.String() { + t.Fatalf("mismatch after marshaling: %s vs %s", sam.SockAddr.String(), sam2.SockAddr.String()) + } + if sam2.SockAddr.String() != addr { + t.Fatalf("mismatch after marshaling: %s vs %s", addr, sam2.SockAddr.String()) + } +} + +func TestSockAddrMultiMarshaler(t *testing.T) { + addr := "192.168.10.24/24" + type d struct { + Addr *sockaddr.SockAddrMarshaler + Addrs []*sockaddr.SockAddrMarshaler + } + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + t.Fatal(err) + } + myD := &d{ + Addr: &sockaddr.SockAddrMarshaler{SockAddr: sa}, + Addrs: []*sockaddr.SockAddrMarshaler{ + &sockaddr.SockAddrMarshaler{SockAddr: sa}, + &sockaddr.SockAddrMarshaler{SockAddr: sa}, + &sockaddr.SockAddrMarshaler{SockAddr: sa}, + }, + } + marshaled, err := json.Marshal(myD) + if err != nil { + t.Fatal(err) + } + var myD2 d + err = json.Unmarshal(marshaled, &myD2) + if err != nil { + t.Fatal(err) + } + if myD.Addr.String() != myD2.Addr.String() { + t.Fatalf("mismatch after marshaling: %s vs %s", myD.Addr.String(), myD2.Addr.String()) + } + if len(myD.Addrs) != len(myD2.Addrs) { + t.Fatalf("mismatch after marshaling: %d vs %d", len(myD.Addrs), len(myD2.Addrs)) + } + for i, v := range myD.Addrs { + if v.String() != myD2.Addrs[i].String() { + t.Fatalf("mismatch after marshaling: %s vs %s", v.String(), myD2.Addrs[i].String()) + } + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/sockaddrs_test.go b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs_test.go new file mode 100644 index 0000000000..118cf5b1ea --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/sockaddrs_test.go @@ -0,0 +1,338 @@ +package sockaddr_test + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/go-sockaddr" +) + +func init() { + lib.SeedMathRand() +} + +// NOTE: A number of these code paths are exercised in template/ and +// cmd/sockaddr/ + +// sockAddrStringInputs allows for easy test creation by developers. +// Parallel arrays of string inputs are converted to their SockAddr +// equivalents for use by unit tests. +type sockAddrStringInputs []struct { + inputAddrs []string + sortedAddrs []string + sortedTypes []sockaddr.SockAddrType + sortFuncs []sockaddr.CmpAddrFunc + numIPv4Inputs int + numIPv6Inputs int + numUnixInputs int +} + +func convertToSockAddrs(t *testing.T, inputs []string) sockaddr.SockAddrs { + sockAddrs := make(sockaddr.SockAddrs, 0, len(inputs)) + for i, input := range inputs { + sa, err := sockaddr.NewSockAddr(input) + if err != nil { + t.Fatalf("[%d] Invalid SockAddr input for %+q: %v", i, input, err) + } + sockAddrs = append(sockAddrs, sa) + } + + return sockAddrs +} + +// shuffleStrings randomly shuffles the list of strings +func shuffleStrings(list []string) { + for i := range list { + j := rand.Intn(i + 1) + list[i], list[j] = list[j], list[i] + } +} + +func TestSockAddr_SockAddrs_AscAddress(t *testing.T) { + testInputs := sockAddrStringInputs{ + { // testNum: 0 + sortFuncs: []sockaddr.CmpAddrFunc{ + sockaddr.AscAddress, + }, + numIPv4Inputs: 9, + numIPv6Inputs: 1, + numUnixInputs: 0, + inputAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "128.95.120.2:53", + "128.95.120.2/32", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "128.95.120.2:8600", + "240.0.0.1/4", + "::", + }, + sortedAddrs: []string{ + "10.0.0.0/8", + "128.95.120.1/32", + "128.95.120.2:53", + "128.95.120.2/32", + "128.95.120.2:8600", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.1.10/24", + "240.0.0.1/4", + "::", + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + shuffleStrings(test.inputAddrs) + inputSockAddrs := convertToSockAddrs(t, test.inputAddrs) + sas := convertToSockAddrs(t, test.sortedAddrs) + sortedIPv4Addrs, nonIPv4Addrs := sas.FilterByType(sockaddr.TypeIPv4) + if l := len(sortedIPv4Addrs); l != test.numIPv4Inputs { + t.Fatal("[%d] Missing IPv4Addrs: expected %d, received %d", idx, test.numIPv4Inputs, l) + } + if len(nonIPv4Addrs) != test.numIPv6Inputs+test.numUnixInputs { + t.Fatal("[%d] Non-IPv4 Address in input", idx) + } + + // Copy inputAddrs so we can manipulate it. wtb const. + sockAddrs := append(sockaddr.SockAddrs(nil), inputSockAddrs...) + filteredAddrs, _ := sockAddrs.FilterByType(sockaddr.TypeIPv4) + sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(filteredAddrs) + ipv4SockAddrs, nonIPv4s := filteredAddrs.FilterByType(sockaddr.TypeIPv4) + if len(nonIPv4s) != 0 { + t.Fatalf("[%d] bad", idx) + } + + for i, ipv4SockAddr := range ipv4SockAddrs { + ipv4Addr := sockaddr.ToIPv4Addr(ipv4SockAddr) + sortedIPv4Addr := sockaddr.ToIPv4Addr(sortedIPv4Addrs[i]) + if ipv4Addr.Address != sortedIPv4Addr.Address { + t.Errorf("[%d/%d] Sort equality failed: expected %s, received %s", idx, i, sortedIPv4Addrs[i], ipv4Addr) + } + } + }) + } +} + +func TestSockAddr_SockAddrs_AscPrivate(t *testing.T) { + testInputs := []struct { + sortFuncs []sockaddr.CmpAddrFunc + inputAddrs []string + sortedAddrs []string + }{ + { // testNum: 0 + sortFuncs: []sockaddr.CmpAddrFunc{ + sockaddr.AscType, + sockaddr.AscPrivate, + sockaddr.AscAddress, + sockaddr.AscType, + sockaddr.AscAddress, + sockaddr.AscPort, + }, + inputAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.0.0/16", + "192.168.1.10/24", + "128.95.120.1/32", + "128.95.120.2/32", + "128.95.120.2:53", + "128.95.120.2:8600", + "240.0.0.1/4", + "::", + }, + sortedAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "192.168.0.0/16", + "192.168.0.0/16", + "192.168.1.10/24", + "240.0.0.1/4", + "128.95.120.1/32", + "128.95.120.2/32", + // "128.95.120.2:53", + // "128.95.120.2:8600", + // "::", + }, + }, + { + sortFuncs: []sockaddr.CmpAddrFunc{ + sockaddr.AscType, + sockaddr.AscPrivate, + sockaddr.AscAddress, + }, + inputAddrs: []string{ + "1.2.3.4:53", + "192.168.1.2", + "/tmp/foo", + "[cc::1]:8600", + "[::1]:53", + }, + sortedAddrs: []string{ + "/tmp/foo", + "192.168.1.2", + "1.2.3.4:53", + "[::1]:53", + "[cc::1]:8600", + }, + }, + { + sortFuncs: []sockaddr.CmpAddrFunc{ + sockaddr.AscType, + sockaddr.AscPrivate, + sockaddr.AscAddress, + }, + inputAddrs: []string{ + "/tmp/foo", + "/tmp/bar", + "1.2.3.4", + }, + sortedAddrs: []string{ + "/tmp/bar", + "/tmp/foo", + "1.2.3.4", + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + + inputAddrs := append([]string(nil), test.inputAddrs...) + shuffleStrings(inputAddrs) + inputSockAddrs := convertToSockAddrs(t, inputAddrs) + + sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs) + + for i, sockAddr := range sortedAddrs { + if !sockAddr.Equal(inputSockAddrs[i]) { + t.Logf("Input Addrs:\t%+v", inputAddrs) + t.Logf("Sorted Addrs:\t%+v", inputSockAddrs) + t.Logf("Expected Addrs:\t%+v", test.sortedAddrs) + t.Fatalf("[%d/%d] Sort AscType/AscAddress failed: expected %+q, received %+q", idx, i, sockAddr, inputSockAddrs[i]) + } + } + }) + } +} + +func TestSockAddr_SockAddrs_AscPort(t *testing.T) { + testInputs := []struct { + name string + sortFuncs []sockaddr.CmpAddrFunc + inputAddrs []string + sortedAddrs []string + }{ + { + name: "simple port test", + sortFuncs: []sockaddr.CmpAddrFunc{ + sockaddr.AscPort, + sockaddr.AscType, + }, + inputAddrs: []string{ + "1.2.3.4:53", + "/tmp/foo", + "[::1]:53", + }, + sortedAddrs: []string{ + "/tmp/foo", + "1.2.3.4:53", + "[::1]:53", + }, + }, + { + name: "simple port test", + sortFuncs: []sockaddr.CmpAddrFunc{ + sockaddr.AscPort, + sockaddr.AscType, + }, + inputAddrs: []string{ + "1.2.3.4:53", + "/tmp/foo", + }, + sortedAddrs: []string{ + "/tmp/foo", + "1.2.3.4:53", + }, + }, + } + + for idx, test := range testInputs { + t.Run(test.name, func(t *testing.T) { + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + + inputAddrs := append([]string(nil), test.inputAddrs...) + shuffleStrings(inputAddrs) + inputSockAddrs := convertToSockAddrs(t, inputAddrs) + + sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs) + + for i, sockAddr := range sortedAddrs { + if !sockAddr.Equal(inputSockAddrs[i]) { + t.Logf("Input Addrs:\t%+v", inputAddrs) + t.Logf("Sorted Addrs:\t%+v", inputSockAddrs) + t.Logf("Expected Addrs:\t%+v", test.sortedAddrs) + t.Fatalf("[%d/%d] Sort AscType/AscAddress failed: expected %+q, received %+q", idx, i, sockAddr, inputSockAddrs[i]) + } + } + }) + } +} + +func TestSockAddr_SockAddrs_AscType(t *testing.T) { + testInputs := sockAddrStringInputs{ + { // testNum: 0 + sortFuncs: []sockaddr.CmpAddrFunc{ + sockaddr.AscType, + }, + inputAddrs: []string{ + "10.0.0.0/8", + "172.16.1.3/12", + "128.95.120.2:53", + "::", + "128.95.120.2/32", + "192.168.0.0/16", + "128.95.120.1/32", + "192.168.1.10/24", + "128.95.120.2:8600", + "240.0.0.1/4", + }, + sortedTypes: []sockaddr.SockAddrType{ + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv4, + sockaddr.TypeIPv6, + }, + }, + } + + for idx, test := range testInputs { + t.Run(fmt.Sprintf("%d", idx), func(t *testing.T) { + shuffleStrings(test.inputAddrs) + + inputSockAddrs := convertToSockAddrs(t, test.inputAddrs) + sortedAddrs := convertToSockAddrs(t, test.sortedAddrs) + + sockaddr.OrderedAddrBy(test.sortFuncs...).Sort(inputSockAddrs) + + for i, sockAddr := range sortedAddrs { + if sockAddr.Type() != sortedAddrs[i].Type() { + t.Errorf("[%d/%d] Sort AscType failed: expected %+q, received %+q", idx, i, sortedAddrs[i], sockAddr) + } + } + }) + } +} diff --git a/vendor/github.com/hashicorp/go-sockaddr/unixsock_test.go b/vendor/github.com/hashicorp/go-sockaddr/unixsock_test.go new file mode 100644 index 0000000000..7ed636a29e --- /dev/null +++ b/vendor/github.com/hashicorp/go-sockaddr/unixsock_test.go @@ -0,0 +1,108 @@ +package sockaddr_test + +import ( + "testing" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +func TestUnixSock_impl_SockAddr(t *testing.T) { + tests := []struct { + name string + input sockaddr.UnixSock + dialPacketArgs []string + dialStreamArgs []string + listenPacketArgs []string + listenStreamArgs []string + }{ + { + name: "simple", + input: sockaddr.MustUnixSock("/tmp/foo"), + dialPacketArgs: []string{"unixgram", "/tmp/foo"}, + dialStreamArgs: []string{"unixgram", "/tmp/foo"}, + listenPacketArgs: []string{"unixgram", "/tmp/foo"}, + listenStreamArgs: []string{"unixgram", "/tmp/foo"}, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + arg1, arg2 := test.input.DialPacketArgs() + if arg1 != test.dialPacketArgs[0] && arg2 != test.dialPacketArgs[1] { + t.Fatalf("%s: %q %q", test.name, arg1, arg2) + } + + arg1, arg2 = test.input.DialStreamArgs() + if arg1 != test.dialStreamArgs[0] && arg2 != test.dialStreamArgs[1] { + t.Fatalf("%s: %q %q", test.name, arg1, arg2) + } + + arg1, arg2 = test.input.ListenPacketArgs() + if arg1 != test.listenPacketArgs[0] && arg2 != test.listenPacketArgs[1] { + t.Fatalf("%s: %q %q", test.name, arg1, arg2) + } + + arg1, arg2 = test.input.ListenStreamArgs() + if arg1 != test.listenStreamArgs[0] && arg2 != test.listenStreamArgs[1] { + t.Fatalf("%s: %q %q", test.name, arg1, arg2) + } + } +} + +func TestUnixSock_Equal(t *testing.T) { + tests := []struct { + name string + input sockaddr.UnixSock + sa sockaddr.SockAddr + equal bool + }{ + { + name: "equal", + input: sockaddr.MustUnixSock("/tmp/foo"), + sa: sockaddr.MustUnixSock("/tmp/foo"), + equal: true, + }, + { + name: "not equal", + input: sockaddr.MustUnixSock("/tmp/foo"), + sa: sockaddr.MustUnixSock("/tmp/bar"), + equal: false, + }, + { + name: "ipv4", + input: sockaddr.MustUnixSock("/tmp/foo"), + sa: sockaddr.MustIPv4Addr("1.2.3.4"), + equal: false, + }, + { + name: "ipv6", + input: sockaddr.MustUnixSock("/tmp/foo"), + sa: sockaddr.MustIPv6Addr("::1"), + equal: false, + }, + } + + for i, test := range tests { + if test.name == "" { + t.Fatalf("test %d needs a name", i) + } + + t.Run(test.name, func(t *testing.T) { + us := test.input + if ret := us.Equal(test.sa); ret != test.equal { + t.Fatalf("%s: equal: %v %q %q", test.name, ret, us, test.sa) + } + }) + } +} + +func TestUnixSockAttrs(t *testing.T) { + const expectedNumAttrs = 1 + usa := sockaddr.UnixSockAttrs() + if len(usa) != expectedNumAttrs { + t.Fatalf("wrong number of UnixSockAttrs: %d vs %d", len(usa), expectedNumAttrs) + } +} diff --git a/vendor/github.com/hashicorp/memberlist/.gitignore b/vendor/github.com/hashicorp/memberlist/.gitignore new file mode 100644 index 0000000000..9158f171a5 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +.vagrant/ + diff --git a/vendor/github.com/hashicorp/memberlist/awareness_test.go b/vendor/github.com/hashicorp/memberlist/awareness_test.go new file mode 100644 index 0000000000..c6ade10af7 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/awareness_test.go @@ -0,0 +1,41 @@ +package memberlist + +import ( + "testing" + "time" +) + +func TestAwareness(t *testing.T) { + cases := []struct { + delta int + score int + timeout time.Duration + }{ + {0, 0, 1 * time.Second}, + {-1, 0, 1 * time.Second}, + {-10, 0, 1 * time.Second}, + {1, 1, 2 * time.Second}, + {-1, 0, 1 * time.Second}, + {10, 7, 8 * time.Second}, + {-1, 6, 7 * time.Second}, + {-1, 5, 6 * time.Second}, + {-1, 4, 5 * time.Second}, + {-1, 3, 4 * time.Second}, + {-1, 2, 3 * time.Second}, + {-1, 1, 2 * time.Second}, + {-1, 0, 1 * time.Second}, + {-1, 0, 1 * time.Second}, + } + + a := newAwareness(8) + for i, c := range cases { + a.ApplyDelta(c.delta) + if a.GetHealthScore() != c.score { + t.Errorf("case %d: score mismatch %d != %d", i, a.score, c.score) + } + if timeout := a.ScaleTimeout(1 * time.Second); timeout != c.timeout { + t.Errorf("case %d: scaled timeout mismatch %9.6f != %9.6f", + i, timeout.Seconds(), c.timeout.Seconds()) + } + } +} diff --git a/vendor/github.com/hashicorp/memberlist/broadcast_test.go b/vendor/github.com/hashicorp/memberlist/broadcast_test.go new file mode 100644 index 0000000000..c6a7302ccb --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/broadcast_test.go @@ -0,0 +1,27 @@ +package memberlist + +import ( + "reflect" + "testing" +) + +func TestMemberlistBroadcast_Invalidates(t *testing.T) { + m1 := &memberlistBroadcast{"test", nil, nil} + m2 := &memberlistBroadcast{"foo", nil, nil} + + if m1.Invalidates(m2) || m2.Invalidates(m1) { + t.Fatalf("unexpected invalidation") + } + + if !m1.Invalidates(m1) { + t.Fatalf("expected invalidation") + } +} + +func TestMemberlistBroadcast_Message(t *testing.T) { + m1 := &memberlistBroadcast{"test", []byte("test"), nil} + msg := m1.Message() + if !reflect.DeepEqual(msg, []byte("test")) { + t.Fatalf("messages do not match") + } +} diff --git a/vendor/github.com/hashicorp/memberlist/integ_test.go b/vendor/github.com/hashicorp/memberlist/integ_test.go new file mode 100644 index 0000000000..f519c6baa6 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/integ_test.go @@ -0,0 +1,89 @@ +package memberlist + +import ( + "fmt" + "log" + "os" + "testing" + "time" +) + +// CheckInteg will skip a test if integration testing is not enabled. +func CheckInteg(t *testing.T) { + if !IsInteg() { + t.SkipNow() + } +} + +// IsInteg returns a boolean telling you if we're in integ testing mode. +func IsInteg() bool { + return os.Getenv("INTEG_TESTS") != "" +} + +// Tests the memberlist by creating a cluster of 100 nodes +// and checking that we get strong convergence of changes. +func TestMemberlist_Integ(t *testing.T) { + CheckInteg(t) + + num := 16 + var members []*Memberlist + + secret := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + eventCh := make(chan NodeEvent, num) + + addr := "127.0.0.1" + for i := 0; i < num; i++ { + c := DefaultLANConfig() + c.Name = fmt.Sprintf("%s:%d", addr, 12345+i) + c.BindAddr = addr + c.BindPort = 12345 + i + c.ProbeInterval = 20 * time.Millisecond + c.ProbeTimeout = 100 * time.Millisecond + c.GossipInterval = 20 * time.Millisecond + c.PushPullInterval = 200 * time.Millisecond + c.SecretKey = secret + + if i == 0 { + c.Events = &ChannelEventDelegate{eventCh} + } + + m, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + members = append(members, m) + defer m.Shutdown() + + if i > 0 { + last := members[i-1] + num, err := m.Join([]string{last.config.Name}) + if num == 0 || err != nil { + t.Fatalf("unexpected err: %s", err) + } + } + } + + // Wait and print debug info + breakTimer := time.After(250 * time.Millisecond) +WAIT: + for { + select { + case e := <-eventCh: + if e.Event == NodeJoin { + log.Printf("[DEBUG] Node join: %v (%d)", *e.Node, members[0].NumMembers()) + } else { + log.Printf("[DEBUG] Node leave: %v (%d)", *e.Node, members[0].NumMembers()) + } + case <-breakTimer: + break WAIT + } + } + + for idx, m := range members { + got := m.NumMembers() + if got != num { + t.Errorf("bad num members at idx %d. Expected %d. Got %d.", + idx, num, got) + } + } +} diff --git a/vendor/github.com/hashicorp/memberlist/keyring_test.go b/vendor/github.com/hashicorp/memberlist/keyring_test.go new file mode 100644 index 0000000000..eec699fd00 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/keyring_test.go @@ -0,0 +1,154 @@ +package memberlist + +import ( + "bytes" + "testing" +) + +var TestKeys [][]byte = [][]byte{ + []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + []byte{15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}, + []byte{8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7}, +} + +func TestKeyring_EmptyRing(t *testing.T) { + // Keyrings can be created with no encryption keys (disabled encryption) + keyring, err := NewKeyring(nil, nil) + if err != nil { + t.Fatalf("err: %s", err) + } + + keys := keyring.GetKeys() + if len(keys) != 0 { + t.Fatalf("Expected 0 keys but have %d", len(keys)) + } +} + +func TestKeyring_PrimaryOnly(t *testing.T) { + // Keyrings can be created using only a primary key + keyring, err := NewKeyring(nil, TestKeys[0]) + if err != nil { + t.Fatalf("err: %s", err) + } + + keys := keyring.GetKeys() + if len(keys) != 1 { + t.Fatalf("Expected 1 key but have %d", len(keys)) + } +} + +func TestKeyring_GetPrimaryKey(t *testing.T) { + keyring, err := NewKeyring(TestKeys, TestKeys[1]) + if err != nil { + t.Fatalf("err: %s", err) + } + + // GetPrimaryKey returns correct key + primaryKey := keyring.GetPrimaryKey() + if !bytes.Equal(primaryKey, TestKeys[1]) { + t.Fatalf("Unexpected primary key: %v", primaryKey) + } +} + +func TestKeyring_AddRemoveUse(t *testing.T) { + keyring, err := NewKeyring(nil, TestKeys[1]) + if err != nil { + t.Fatalf("err :%s", err) + } + + // Use non-existent key throws error + if err := keyring.UseKey(TestKeys[2]); err == nil { + t.Fatalf("Expected key not installed error") + } + + // Add key to ring + if err := keyring.AddKey(TestKeys[2]); err != nil { + t.Fatalf("err: %s", err) + } + + keys := keyring.GetKeys() + if !bytes.Equal(keys[0], TestKeys[1]) { + t.Fatalf("Unexpected primary key change") + } + + if len(keys) != 2 { + t.Fatalf("Expected 2 keys but have %d", len(keys)) + } + + // Use key that exists should succeed + if err := keyring.UseKey(TestKeys[2]); err != nil { + t.Fatalf("err: %s", err) + } + + primaryKey := keyring.GetPrimaryKey() + if !bytes.Equal(primaryKey, TestKeys[2]) { + t.Fatalf("Unexpected primary key: %v", primaryKey) + } + + // Removing primary key should fail + if err := keyring.RemoveKey(TestKeys[2]); err == nil { + t.Fatalf("Expected primary key removal error") + } + + // Removing non-primary key should succeed + if err := keyring.RemoveKey(TestKeys[1]); err != nil { + t.Fatalf("err: %s", err) + } + + keys = keyring.GetKeys() + if len(keys) != 1 { + t.Fatalf("Expected 1 key but have %d", len(keys)) + } +} + +func TestKeyRing_MultiKeyEncryptDecrypt(t *testing.T) { + plaintext := []byte("this is a plain text message") + extra := []byte("random data") + + keyring, err := NewKeyring(TestKeys, TestKeys[0]) + if err != nil { + t.Fatalf("err: %s", err) + } + + // First encrypt using the primary key and make sure we can decrypt + var buf bytes.Buffer + err = encryptPayload(1, TestKeys[0], plaintext, extra, &buf) + if err != nil { + t.Fatalf("err: %v", err) + } + + msg, err := decryptPayload(keyring.GetKeys(), buf.Bytes(), extra) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !bytes.Equal(msg, plaintext) { + t.Fatalf("bad: %v", msg) + } + + // Now encrypt with a secondary key and try decrypting again. + buf.Reset() + err = encryptPayload(1, TestKeys[2], plaintext, extra, &buf) + if err != nil { + t.Fatalf("err: %v", err) + } + + msg, err = decryptPayload(keyring.GetKeys(), buf.Bytes(), extra) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !bytes.Equal(msg, plaintext) { + t.Fatalf("bad: %v", msg) + } + + // Remove a key from the ring, and then try decrypting again + if err := keyring.RemoveKey(TestKeys[2]); err != nil { + t.Fatalf("err: %s", err) + } + + msg, err = decryptPayload(keyring.GetKeys(), buf.Bytes(), extra) + if err == nil { + t.Fatalf("Expected no keys to decrypt message") + } +} diff --git a/vendor/github.com/hashicorp/memberlist/logging_test.go b/vendor/github.com/hashicorp/memberlist/logging_test.go new file mode 100644 index 0000000000..cc04b8a914 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/logging_test.go @@ -0,0 +1,47 @@ +package memberlist + +import ( + "fmt" + "net" + "testing" +) + +func TestLogging_Address(t *testing.T) { + s := LogAddress(nil) + if s != "from=" { + t.Fatalf("bad: %s", s) + } + + addr, err := net.ResolveIPAddr("ip4", "127.0.0.1") + if err != nil { + t.Fatalf("err: %v", err) + } + + s = LogAddress(addr) + if s != "from=127.0.0.1" { + t.Fatalf("bad: %s", s) + } +} + +func TestLogging_Conn(t *testing.T) { + s := LogConn(nil) + if s != "from=" { + t.Fatalf("bad: %s", s) + } + + ln, err := net.Listen("tcp", ":0") + if err != nil { + t.Fatalf("err: %v", err) + } + + conn, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + t.Fatalf("err: %v", err) + } + defer conn.Close() + + s = LogConn(conn) + if s != fmt.Sprintf("from=%s", conn.RemoteAddr().String()) { + t.Fatalf("bad: %s", s) + } +} diff --git a/vendor/github.com/hashicorp/memberlist/memberlist_test.go b/vendor/github.com/hashicorp/memberlist/memberlist_test.go new file mode 100644 index 0000000000..ee2fc5d529 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/memberlist_test.go @@ -0,0 +1,1545 @@ +package memberlist + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "net" + "os" + "reflect" + "strings" + "sync" + "testing" + "time" + + "github.com/miekg/dns" +) + +var bindLock sync.Mutex +var bindNum byte = 10 + +func getBindAddr() net.IP { + bindLock.Lock() + defer bindLock.Unlock() + + result := net.IPv4(127, 0, 0, bindNum) + bindNum++ + if bindNum > 255 { + bindNum = 10 + } + + return result +} + +func testConfig() *Config { + config := DefaultLANConfig() + config.BindAddr = getBindAddr().String() + config.Name = config.BindAddr + return config +} + +func yield() { + time.Sleep(5 * time.Millisecond) +} + +type MockDelegate struct { + meta []byte + msgs [][]byte + broadcasts [][]byte + state []byte + remoteState []byte +} + +func (m *MockDelegate) NodeMeta(limit int) []byte { + return m.meta +} + +func (m *MockDelegate) NotifyMsg(msg []byte) { + cp := make([]byte, len(msg)) + copy(cp, msg) + m.msgs = append(m.msgs, cp) +} + +func (m *MockDelegate) GetBroadcasts(overhead, limit int) [][]byte { + b := m.broadcasts + m.broadcasts = nil + return b +} + +func (m *MockDelegate) LocalState(join bool) []byte { + return m.state +} + +func (m *MockDelegate) MergeRemoteState(s []byte, join bool) { + m.remoteState = s +} + +// Returns a new Memberlist on an open port by trying a range of port numbers +// until something sticks. +func NewMemberlistOnOpenPort(c *Config) (*Memberlist, error) { + c.BindPort = 0 + return newMemberlist(c) +} + +func GetMemberlistDelegate(t *testing.T) (*Memberlist, *MockDelegate) { + d := &MockDelegate{} + + c := testConfig() + c.Delegate = d + + m, err := NewMemberlistOnOpenPort(c) + if err != nil { + t.Fatalf("failed to start: %v", err) + return nil, nil + } + + return m, d +} + +func GetMemberlist(t *testing.T) *Memberlist { + c := testConfig() + + m, err := NewMemberlistOnOpenPort(c) + if err != nil { + t.Fatalf("failed to start: %v", err) + return nil + } + + return m +} + +func TestDefaultLANConfig_protocolVersion(t *testing.T) { + c := DefaultLANConfig() + if c.ProtocolVersion != ProtocolVersion2Compatible { + t.Fatalf("should be max: %d", c.ProtocolVersion) + } +} + +func TestCreate_protocolVersion(t *testing.T) { + cases := []struct { + version uint8 + err bool + }{ + {ProtocolVersionMin, false}, + {ProtocolVersionMax, false}, + // TODO(mitchellh): uncommon when we're over 0 + //{ProtocolVersionMin - 1, true}, + {ProtocolVersionMax + 1, true}, + {ProtocolVersionMax - 1, false}, + } + + for _, tc := range cases { + c := DefaultLANConfig() + c.BindAddr = getBindAddr().String() + c.ProtocolVersion = tc.version + m, err := Create(c) + if tc.err && err == nil { + t.Errorf("Should've failed with version: %d", tc.version) + } else if !tc.err && err != nil { + t.Errorf("Version '%d' error: %s", tc.version, err) + } + + if err == nil { + m.Shutdown() + } + } +} + +func TestCreate_secretKey(t *testing.T) { + cases := []struct { + key []byte + err bool + }{ + {make([]byte, 0), false}, + {[]byte("abc"), true}, + {make([]byte, 16), false}, + {make([]byte, 38), true}, + } + + for _, tc := range cases { + c := DefaultLANConfig() + c.BindAddr = getBindAddr().String() + c.SecretKey = tc.key + m, err := Create(c) + if tc.err && err == nil { + t.Errorf("Should've failed with key: %#v", tc.key) + } else if !tc.err && err != nil { + t.Errorf("Key '%#v' error: %s", tc.key, err) + } + + if err == nil { + m.Shutdown() + } + } +} + +func TestCreate_secretKeyEmpty(t *testing.T) { + c := DefaultLANConfig() + c.BindAddr = getBindAddr().String() + c.SecretKey = make([]byte, 0) + m, err := Create(c) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m.Shutdown() + + if m.config.EncryptionEnabled() { + t.Fatalf("Expected encryption to be disabled") + } +} + +func TestCreate_keyringOnly(t *testing.T) { + c := DefaultLANConfig() + c.BindAddr = getBindAddr().String() + keyring, err := NewKeyring(nil, make([]byte, 16)) + if err != nil { + t.Fatalf("err: %s", err) + } + c.Keyring = keyring + + m, err := Create(c) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m.Shutdown() + + if !m.config.EncryptionEnabled() { + t.Fatalf("Expected encryption to be enabled") + } +} + +func TestCreate_keyringAndSecretKey(t *testing.T) { + c := DefaultLANConfig() + c.BindAddr = getBindAddr().String() + keyring, err := NewKeyring(nil, make([]byte, 16)) + if err != nil { + t.Fatalf("err: %s", err) + } + c.Keyring = keyring + c.SecretKey = []byte{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} + + m, err := Create(c) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m.Shutdown() + + if !m.config.EncryptionEnabled() { + t.Fatalf("Expected encryption to be enabled") + } + + ringKeys := c.Keyring.GetKeys() + if !bytes.Equal(c.SecretKey, ringKeys[0]) { + t.Fatalf("Unexpected primary key %v", ringKeys[0]) + } +} + +func TestCreate_invalidLoggerSettings(t *testing.T) { + c := DefaultLANConfig() + c.BindAddr = getBindAddr().String() + c.Logger = log.New(ioutil.Discard, "", log.LstdFlags) + c.LogOutput = ioutil.Discard + + _, err := Create(c) + if err == nil { + t.Fatal("Memberlist should not allow both LogOutput and Logger to be set, but it did not raise an error") + } +} + +func TestCreate(t *testing.T) { + c := testConfig() + c.ProtocolVersion = ProtocolVersionMin + c.DelegateProtocolVersion = 13 + c.DelegateProtocolMin = 12 + c.DelegateProtocolMax = 24 + + m, err := Create(c) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m.Shutdown() + + yield() + + members := m.Members() + if len(members) != 1 { + t.Fatalf("bad number of members") + } + + if members[0].PMin != ProtocolVersionMin { + t.Fatalf("bad: %#v", members[0]) + } + + if members[0].PMax != ProtocolVersionMax { + t.Fatalf("bad: %#v", members[0]) + } + + if members[0].PCur != c.ProtocolVersion { + t.Fatalf("bad: %#v", members[0]) + } + + if members[0].DMin != c.DelegateProtocolMin { + t.Fatalf("bad: %#v", members[0]) + } + + if members[0].DMax != c.DelegateProtocolMax { + t.Fatalf("bad: %#v", members[0]) + } + + if members[0].DCur != c.DelegateProtocolVersion { + t.Fatalf("bad: %#v", members[0]) + } +} + +func TestMemberList_CreateShutdown(t *testing.T) { + m := GetMemberlist(t) + m.schedule() + if err := m.Shutdown(); err != nil { + t.Fatalf("failed to shutdown %v", err) + } +} + +func TestMemberList_ResolveAddr(t *testing.T) { + m := GetMemberlist(t) + if _, err := m.resolveAddr("localhost"); err != nil { + t.Fatalf("Could not resolve localhost: %s", err) + } + if _, err := m.resolveAddr("[::1]:80"); err != nil { + t.Fatalf("Could not understand ipv6 pair: %s", err) + } + if _, err := m.resolveAddr("[::1]"); err != nil { + t.Fatalf("Could not understand ipv6 non-pair") + } + if _, err := m.resolveAddr(":80"); err == nil { + t.Fatalf("Understood hostless port") + } + if _, err := m.resolveAddr("localhost:80"); err != nil { + t.Fatalf("Could not understand hostname port combo: %s", err) + } + if _, err := m.resolveAddr("localhost:80000"); err == nil { + t.Fatalf("Understood too high port") + } + if _, err := m.resolveAddr("127.0.0.1:80"); err != nil { + t.Fatalf("Could not understand hostname port combo: %s", err) + } + if _, err := m.resolveAddr("[2001:db8:a0b:12f0::1]:80"); err != nil { + t.Fatalf("Could not understand hostname port combo: %s", err) + } + if _, err := m.resolveAddr("127.0.0.1"); err != nil { + t.Fatalf("Could not understand IPv4 only %s", err) + } + if _, err := m.resolveAddr("[2001:db8:a0b:12f0::1]"); err != nil { + t.Fatalf("Could not understand IPv6 only %s", err) + } +} + +type dnsHandler struct { + t *testing.T +} + +func (h dnsHandler) ServeDNS(w dns.ResponseWriter, r *dns.Msg) { + if len(r.Question) != 1 { + h.t.Fatalf("bad: %#v", r.Question) + } + + name := "join.service.consul." + question := r.Question[0] + if question.Name != name || question.Qtype != dns.TypeANY { + h.t.Fatalf("bad: %#v", question) + } + + m := new(dns.Msg) + m.SetReply(r) + m.Authoritative = true + m.RecursionAvailable = false + m.Answer = append(m.Answer, &dns.A{ + Hdr: dns.RR_Header{ + Name: name, + Rrtype: dns.TypeA, + Class: dns.ClassINET}, + A: net.ParseIP("127.0.0.1"), + }) + m.Answer = append(m.Answer, &dns.AAAA{ + Hdr: dns.RR_Header{ + Name: name, + Rrtype: dns.TypeAAAA, + Class: dns.ClassINET}, + AAAA: net.ParseIP("2001:db8:a0b:12f0::1"), + }) + if err := w.WriteMsg(m); err != nil { + h.t.Fatalf("err: %v", err) + } +} + +func TestMemberList_ResolveAddr_TCP_First(t *testing.T) { + bind := "127.0.0.1:8600" + + var wg sync.WaitGroup + wg.Add(1) + server := &dns.Server{ + Addr: bind, + Handler: dnsHandler{t}, + Net: "tcp", + NotifyStartedFunc: wg.Done, + } + defer server.Shutdown() + + go func() { + if err := server.ListenAndServe(); err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("err: %v", err) + } + }() + wg.Wait() + + tmpFile, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("err: %v", err) + } + defer os.Remove(tmpFile.Name()) + + content := []byte(fmt.Sprintf("nameserver %s", bind)) + if _, err := tmpFile.Write(content); err != nil { + t.Fatalf("err: %v", err) + } + if err := tmpFile.Close(); err != nil { + t.Fatalf("err: %v", err) + } + + m := GetMemberlist(t) + m.config.DNSConfigPath = tmpFile.Name() + m.setAlive() + m.schedule() + defer m.Shutdown() + + // Try with and without the trailing dot. + hosts := []string{ + "join.service.consul.", + "join.service.consul", + } + for _, host := range hosts { + ips, err := m.resolveAddr(host) + if err != nil { + t.Fatalf("err: %v", err) + } + port := uint16(m.config.BindPort) + expected := []ipPort{ + ipPort{net.ParseIP("127.0.0.1"), port}, + ipPort{net.ParseIP("2001:db8:a0b:12f0::1"), port}, + } + if !reflect.DeepEqual(ips, expected) { + t.Fatalf("bad: %#v expected: %#v", ips, expected) + } + } +} + +func TestMemberList_Members(t *testing.T) { + n1 := &Node{Name: "test"} + n2 := &Node{Name: "test2"} + n3 := &Node{Name: "test3"} + + m := &Memberlist{} + nodes := []*nodeState{ + &nodeState{Node: *n1, State: stateAlive}, + &nodeState{Node: *n2, State: stateDead}, + &nodeState{Node: *n3, State: stateSuspect}, + } + m.nodes = nodes + + members := m.Members() + if !reflect.DeepEqual(members, []*Node{n1, n3}) { + t.Fatalf("bad members") + } +} + +func TestMemberlist_Join(t *testing.T) { + m1 := GetMemberlist(t) + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + + m2, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + if m2.estNumNodes() != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } +} + +type CustomMergeDelegate struct { + invoked bool +} + +func (c *CustomMergeDelegate) NotifyMerge(nodes []*Node) error { + log.Printf("Cancel merge") + c.invoked = true + return fmt.Errorf("Custom merge canceled") +} + +func TestMemberlist_Join_Cancel(t *testing.T) { + m1 := GetMemberlist(t) + merge1 := &CustomMergeDelegate{} + m1.config.Merge = merge1 + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + + m2, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + merge2 := &CustomMergeDelegate{} + m2.config.Merge = merge2 + defer m2.Shutdown() + + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 0 { + t.Fatalf("unexpected 0: %d", num) + } + if !strings.Contains(err.Error(), "Custom merge canceled") { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 1 { + t.Fatalf("should have 1 nodes! %v", m2.Members()) + } + if len(m1.Members()) != 1 { + t.Fatalf("should have 1 nodes! %v", m1.Members()) + } + + // Check delegate invocation + if !merge1.invoked { + t.Fatalf("should invoke delegate") + } + if !merge2.invoked { + t.Fatalf("should invoke delegate") + } +} + +type CustomAliveDelegate struct { + Ignore string + count int +} + +func (c *CustomAliveDelegate) NotifyAlive(peer *Node) error { + c.count++ + if peer.Name == c.Ignore { + return nil + } + log.Printf("Cancel alive") + return fmt.Errorf("Custom alive canceled") +} + +func TestMemberlist_Join_Cancel_Passive(t *testing.T) { + m1 := GetMemberlist(t) + alive1 := &CustomAliveDelegate{ + Ignore: m1.config.Name, + } + m1.config.Alive = alive1 + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + + m2, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + alive2 := &CustomAliveDelegate{ + Ignore: c.Name, + } + m2.config.Alive = alive2 + defer m2.Shutdown() + + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 1 { + t.Fatalf("should have 1 nodes! %v", m2.Members()) + } + if len(m1.Members()) != 1 { + t.Fatalf("should have 1 nodes! %v", m1.Members()) + } + + // Check delegate invocation + if alive1.count == 0 { + t.Fatalf("should invoke delegate: %d", alive1.count) + } + if alive2.count == 0 { + t.Fatalf("should invoke delegate: %d", alive2.count) + } +} + +func TestMemberlist_Join_protocolVersions(t *testing.T) { + c1 := testConfig() + c2 := testConfig() + c3 := testConfig() + c3.ProtocolVersion = ProtocolVersionMax + + m1, err := Create(c1) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m1.Shutdown() + + m2, err := Create(c2) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m2.Shutdown() + + m3, err := Create(c3) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m3.Shutdown() + + _, err = m1.Join([]string{c2.BindAddr}) + if err != nil { + t.Fatalf("err: %s", err) + } + + yield() + + _, err = m1.Join([]string{c3.BindAddr}) + if err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestMemberlist_Leave(t *testing.T) { + m1 := GetMemberlist(t) + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + c.GossipInterval = time.Millisecond + + m2, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + if len(m1.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + + // Leave + m1.Leave(time.Second) + + // Wait for leave + time.Sleep(10 * time.Millisecond) + + // m1 should think dead + if len(m1.Members()) != 1 { + t.Fatalf("should have 1 node") + } + + if len(m2.Members()) != 1 { + t.Fatalf("should have 1 node") + } +} + +func TestMemberlist_JoinShutdown(t *testing.T) { + m1 := GetMemberlist(t) + m1.setAlive() + m1.schedule() + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + c.ProbeInterval = time.Millisecond + c.ProbeTimeout = 100 * time.Microsecond + c.SuspicionMaxTimeoutMult = 1 + + m2, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + + m1.Shutdown() + + time.Sleep(10 * time.Millisecond) + + if len(m2.Members()) != 1 { + t.Fatalf("should have 1 nodes! %v", m2.Members()) + } +} + +func TestMemberlist_delegateMeta(t *testing.T) { + c1 := testConfig() + c2 := testConfig() + c1.Delegate = &MockDelegate{meta: []byte("web")} + c2.Delegate = &MockDelegate{meta: []byte("lb")} + + m1, err := Create(c1) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m1.Shutdown() + + m2, err := Create(c2) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m2.Shutdown() + + _, err = m1.Join([]string{c2.BindAddr}) + if err != nil { + t.Fatalf("err: %s", err) + } + + yield() + + var roles map[string]string + + // Check the roles of members of m1 + m1m := m1.Members() + if len(m1m) != 2 { + t.Fatalf("bad: %#v", m1m) + } + + roles = make(map[string]string) + for _, m := range m1m { + roles[m.Name] = string(m.Meta) + } + + if r := roles[c1.Name]; r != "web" { + t.Fatalf("bad role for %s: %s", c1.Name, r) + } + + if r := roles[c2.Name]; r != "lb" { + t.Fatalf("bad role for %s: %s", c2.Name, r) + } + + // Check the roles of members of m2 + m2m := m2.Members() + if len(m2m) != 2 { + t.Fatalf("bad: %#v", m2m) + } + + roles = make(map[string]string) + for _, m := range m2m { + roles[m.Name] = string(m.Meta) + } + + if r := roles[c1.Name]; r != "web" { + t.Fatalf("bad role for %s: %s", c1.Name, r) + } + + if r := roles[c2.Name]; r != "lb" { + t.Fatalf("bad role for %s: %s", c2.Name, r) + } +} + +func TestMemberlist_delegateMeta_Update(t *testing.T) { + c1 := testConfig() + c2 := testConfig() + mock1 := &MockDelegate{meta: []byte("web")} + mock2 := &MockDelegate{meta: []byte("lb")} + c1.Delegate = mock1 + c2.Delegate = mock2 + + m1, err := Create(c1) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m1.Shutdown() + + m2, err := Create(c2) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m2.Shutdown() + + _, err = m1.Join([]string{c2.BindAddr}) + if err != nil { + t.Fatalf("err: %s", err) + } + + yield() + + // Update the meta data roles + mock1.meta = []byte("api") + mock2.meta = []byte("db") + + m1.UpdateNode(0) + m2.UpdateNode(0) + yield() + + // Check the updates have propagated + var roles map[string]string + + // Check the roles of members of m1 + m1m := m1.Members() + if len(m1m) != 2 { + t.Fatalf("bad: %#v", m1m) + } + + roles = make(map[string]string) + for _, m := range m1m { + roles[m.Name] = string(m.Meta) + } + + if r := roles[c1.Name]; r != "api" { + t.Fatalf("bad role for %s: %s", c1.Name, r) + } + + if r := roles[c2.Name]; r != "db" { + t.Fatalf("bad role for %s: %s", c2.Name, r) + } + + // Check the roles of members of m2 + m2m := m2.Members() + if len(m2m) != 2 { + t.Fatalf("bad: %#v", m2m) + } + + roles = make(map[string]string) + for _, m := range m2m { + roles[m.Name] = string(m.Meta) + } + + if r := roles[c1.Name]; r != "api" { + t.Fatalf("bad role for %s: %s", c1.Name, r) + } + + if r := roles[c2.Name]; r != "db" { + t.Fatalf("bad role for %s: %s", c2.Name, r) + } +} + +func TestMemberlist_UserData(t *testing.T) { + m1, d1 := GetMemberlistDelegate(t) + d1.state = []byte("something") + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second delegate with things to send + d2 := &MockDelegate{} + d2.broadcasts = [][]byte{ + []byte("test"), + []byte("foobar"), + } + d2.state = []byte("my state") + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + c.GossipInterval = time.Millisecond + c.PushPullInterval = time.Millisecond + c.Delegate = d2 + + m2, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + // Check the hosts + if m2.NumMembers() != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + + // Wait for a little while + time.Sleep(3 * time.Millisecond) + + // Ensure we got the messages + if len(d1.msgs) != 2 { + t.Fatalf("should have 2 messages!") + } + if !reflect.DeepEqual(d1.msgs[0], []byte("test")) { + t.Fatalf("bad msg %v", d1.msgs[0]) + } + if !reflect.DeepEqual(d1.msgs[1], []byte("foobar")) { + t.Fatalf("bad msg %v", d1.msgs[1]) + } + + // Check the push/pull state + if !reflect.DeepEqual(d1.remoteState, []byte("my state")) { + t.Fatalf("bad state %s", d1.remoteState) + } + if !reflect.DeepEqual(d2.remoteState, []byte("something")) { + t.Fatalf("bad state %s", d2.remoteState) + } +} + +func TestMemberlist_SendTo(t *testing.T) { + m1, d1 := GetMemberlistDelegate(t) + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second delegate with things to send + d2 := &MockDelegate{} + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + c.GossipInterval = time.Millisecond + c.PushPullInterval = time.Millisecond + c.Delegate = d2 + + m2, err := Create(c) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if m2.NumMembers() != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + + // Try to do a direct send + m2Addr := &net.UDPAddr{IP: addr1, + Port: c.BindPort} + if err := m1.SendTo(m2Addr, []byte("ping")); err != nil { + t.Fatalf("err: %v", err) + } + + m1Addr := &net.UDPAddr{IP: net.ParseIP(m1.config.BindAddr), + Port: m1.config.BindPort} + if err := m2.SendTo(m1Addr, []byte("pong")); err != nil { + t.Fatalf("err: %v", err) + } + + // Wait for a little while + time.Sleep(3 * time.Millisecond) + + // Ensure we got the messages + if len(d1.msgs) != 1 { + t.Fatalf("should have 1 messages!") + } + if !reflect.DeepEqual(d1.msgs[0], []byte("pong")) { + t.Fatalf("bad msg %v", d1.msgs[0]) + } + + if len(d2.msgs) != 1 { + t.Fatalf("should have 1 messages!") + } + if !reflect.DeepEqual(d2.msgs[0], []byte("ping")) { + t.Fatalf("bad msg %v", d2.msgs[0]) + } +} + +func TestMemberlistProtocolVersion(t *testing.T) { + c := DefaultLANConfig() + c.BindAddr = getBindAddr().String() + c.ProtocolVersion = ProtocolVersionMax + m, err := Create(c) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m.Shutdown() + + result := m.ProtocolVersion() + if result != ProtocolVersionMax { + t.Fatalf("bad: %d", result) + } +} + +func TestMemberlist_Join_DeadNode(t *testing.T) { + m1 := GetMemberlist(t) + m1.config.TCPTimeout = 50 * time.Millisecond + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second "node", which is just a TCP listener that + // does not ever respond. This is to test our deadliens + addr1 := getBindAddr() + list, err := net.Listen("tcp", fmt.Sprintf("%s:%d", addr1.String(), m1.config.BindPort)) + if err != nil { + t.Fatalf("err: %v", err) + } + defer list.Close() + + // Ensure we don't hang forever + timer := time.AfterFunc(100*time.Millisecond, func() { + panic("should have timed out by now") + }) + defer timer.Stop() + + num, err := m1.Join([]string{addr1.String()}) + if num != 0 { + t.Fatalf("unexpected 0: %d", num) + } + if err == nil { + t.Fatal("expect err") + } +} + +// Tests that nodes running different versions of the protocol can successfully +// discover each other and add themselves to their respective member lists. +func TestMemberlist_Join_Prototocol_Compatibility(t *testing.T) { + testProtocolVersionPair := func(t *testing.T, pv1 uint8, pv2 uint8) { + c1 := testConfig() + c1.ProtocolVersion = pv1 + m1, err := NewMemberlistOnOpenPort(c1) + if err != nil { + t.Fatalf("failed to start: %v", err) + } + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + c2 := DefaultLANConfig() + addr1 := getBindAddr() + c2.Name = addr1.String() + c2.BindAddr = addr1.String() + c2.BindPort = m1.config.BindPort + c2.ProtocolVersion = pv2 + + m2, err := Create(c2) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + + // Check the hosts + if len(m1.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m1.Members()) + } + } + + testProtocolVersionPair(t, 2, 1) + testProtocolVersionPair(t, 2, 3) + testProtocolVersionPair(t, 3, 2) + testProtocolVersionPair(t, 3, 1) +} + +func TestMemberlist_Join_IPv6(t *testing.T) { + // Since this binds to all interfaces we need to exclude other tests + // from grabbing an interface. + bindLock.Lock() + defer bindLock.Unlock() + + c1 := DefaultLANConfig() + c1.Name = "A" + c1.BindAddr = "[::1]" + var m1 *Memberlist + var err error + for i := 0; i < 100; i++ { + c1.BindPort = 23456 + i + m1, err = Create(c1) + if err == nil { + break + } + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m1.Shutdown() + + // Create a second node + c2 := DefaultLANConfig() + c2.Name = "B" + c2.BindAddr = "[::1]" + var m2 *Memberlist + for i := 0; i < 100; i++ { + c2.BindPort = c1.BindPort + 1 + i + m2, err = Create(c2) + if err == nil { + break + } + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + num, err := m2.Join([]string{fmt.Sprintf("%s:%d", m1.config.BindAddr, 23456)}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + + if len(m1.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } +} + +func TestAdvertiseAddr(t *testing.T) { + c := testConfig() + c.AdvertiseAddr = "127.0.1.100" + c.AdvertisePort = 23456 + + m, err := Create(c) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m.Shutdown() + + yield() + + members := m.Members() + if len(members) != 1 { + t.Fatalf("bad number of members") + } + + if bytes.Compare(members[0].Addr, []byte{127, 0, 1, 100}) != 0 { + t.Fatalf("bad: %#v", members[0]) + } + + if members[0].Port != 23456 { + t.Fatalf("bad: %#v", members[0]) + } +} + +type MockConflict struct { + existing *Node + other *Node +} + +func (m *MockConflict) NotifyConflict(existing, other *Node) { + m.existing = existing + m.other = other +} + +func TestMemberlist_conflictDelegate(t *testing.T) { + c1 := testConfig() + c2 := testConfig() + mock := &MockConflict{} + c1.Conflict = mock + + // Ensure name conflict + c2.Name = c1.Name + + m1, err := Create(c1) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m1.Shutdown() + + m2, err := Create(c2) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m2.Shutdown() + + _, err = m1.Join([]string{c2.BindAddr}) + if err != nil { + t.Fatalf("err: %s", err) + } + + yield() + + // Ensure we were notified + if mock.existing == nil || mock.other == nil { + t.Fatalf("should get notified") + } + if mock.existing.Name != mock.other.Name { + t.Fatalf("bad: %v %v", mock.existing, mock.other) + } +} + +type MockPing struct { + other *Node + rtt time.Duration + payload []byte +} + +func (m *MockPing) NotifyPingComplete(other *Node, rtt time.Duration, payload []byte) { + m.other = other + m.rtt = rtt + m.payload = payload +} + +const DEFAULT_PAYLOAD = "whatever" + +func (m *MockPing) AckPayload() []byte { + return []byte(DEFAULT_PAYLOAD) +} + +func TestMemberlist_PingDelegate(t *testing.T) { + m1 := GetMemberlist(t) + m1.config.Ping = &MockPing{} + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second node + c := DefaultLANConfig() + addr1 := getBindAddr() + c.Name = addr1.String() + c.BindAddr = addr1.String() + c.BindPort = m1.config.BindPort + c.ProbeInterval = time.Millisecond + mock := &MockPing{} + c.Ping = mock + + m2, err := Create(c) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m2.Shutdown() + + _, err = m2.Join([]string{m1.config.BindAddr}) + if err != nil { + t.Fatalf("err: %s", err) + } + + yield() + + // Ensure we were notified + if mock.other == nil { + t.Fatalf("should get notified") + } + + if !reflect.DeepEqual(mock.other, m1.LocalNode()) { + t.Fatalf("not notified about the correct node; expected: %+v; actual: %+v", + m2.LocalNode(), mock.other) + } + + if mock.rtt <= 0 { + t.Fatalf("rtt should be greater than 0") + } + + if bytes.Compare(mock.payload, []byte(DEFAULT_PAYLOAD)) != 0 { + t.Fatalf("incorrect payload. expected: %v; actual: %v", []byte(DEFAULT_PAYLOAD), mock.payload) + } +} + +func TestMemberlist_EncryptedGossipTransition(t *testing.T) { + m1 := GetMemberlist(t) + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + // Create a second node with the first stage of gossip transition settings + conf2 := DefaultLANConfig() + addr2 := getBindAddr() + conf2.Name = addr2.String() + conf2.BindAddr = addr2.String() + conf2.BindPort = m1.config.BindPort + conf2.GossipInterval = time.Millisecond + conf2.SecretKey = []byte("Hi16ZXu2lNCRVwtr20khAg==") + conf2.GossipVerifyIncoming = false + conf2.GossipVerifyOutgoing = false + + m2, err := Create(conf2) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m2.Shutdown() + + // Join the second node. m1 has no encryption while m2 has encryption configured and + // can receive encrypted gossip, but will not encrypt outgoing gossip. + num, err := m2.Join([]string{m1.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m2.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + if m2.estNumNodes() != 2 { + t.Fatalf("should have 2 nodes! %v", m2.Members()) + } + + // Leave with the first node + m1.Leave(time.Second) + + // Wait for leave + time.Sleep(10 * time.Millisecond) + + // Create a third node that has the second stage of gossip transition settings + conf3 := DefaultLANConfig() + addr3 := getBindAddr() + conf3.Name = addr3.String() + conf3.BindAddr = addr3.String() + conf3.BindPort = m1.config.BindPort + conf3.GossipInterval = time.Millisecond + conf3.SecretKey = conf2.SecretKey + conf3.GossipVerifyIncoming = false + + m3, err := Create(conf3) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m3.Shutdown() + + // Join the third node to the second node. At this step, both nodes have encryption + // configured but only m3 is sending encrypted gossip. + num, err = m3.Join([]string{m2.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m3.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m3.Members()) + + } + if m3.estNumNodes() != 2 { + t.Fatalf("should have 2 nodes! %v", m3.Members()) + } + + // Leave with the second node + m2.Leave(time.Second) + + // Wait for leave + time.Sleep(10 * time.Millisecond) + + // Create a fourth node that has the second stage of gossip transition settings + conf4 := DefaultLANConfig() + addr4 := getBindAddr() + conf4.Name = addr4.String() + conf4.BindAddr = addr4.String() + conf4.BindPort = m3.config.BindPort + conf4.GossipInterval = time.Millisecond + conf4.SecretKey = conf2.SecretKey + + m4, err := Create(conf4) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + defer m4.Shutdown() + + // Join the fourth node to the third node. At this step, both m3 and m4 are speaking + // encrypted gossip and m3 is still accepting insecure gossip. + num, err = m4.Join([]string{m3.config.BindAddr}) + if num != 1 { + t.Fatalf("unexpected 1: %d", num) + } + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + // Check the hosts + if len(m4.Members()) != 2 { + t.Fatalf("should have 2 nodes! %v", m4.Members()) + + } + if m4.estNumNodes() != 2 { + t.Fatalf("should have 2 nodes! %v", m4.Members()) + } +} + +// Consul bug, rapid restart (before failure detection), +// with an updated meta data. Should be at incarnation 1 for +// both. +// +// This test is uncommented because it requires that either we +// can rebind the socket (SO_REUSEPORT) which Go does not allow, +// OR we must disable the address conflict checking in memberlist. +// I just comment out that code to test this case. +// +//func TestMemberlist_Restart_delegateMeta_Update(t *testing.T) { +// c1 := testConfig() +// c2 := testConfig() +// mock1 := &MockDelegate{meta: []byte("web")} +// mock2 := &MockDelegate{meta: []byte("lb")} +// c1.Delegate = mock1 +// c2.Delegate = mock2 + +// m1, err := Create(c1) +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// defer m1.Shutdown() + +// m2, err := Create(c2) +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// defer m2.Shutdown() + +// _, err = m1.Join([]string{c2.BindAddr}) +// if err != nil { +// t.Fatalf("err: %s", err) +// } + +// yield() + +// // Recreate m1 with updated meta +// m1.Shutdown() +// c3 := testConfig() +// c3.Name = c1.Name +// c3.Delegate = mock1 +// c3.GossipInterval = time.Millisecond +// mock1.meta = []byte("api") + +// m1, err = Create(c3) +// if err != nil { +// t.Fatalf("err: %s", err) +// } +// defer m1.Shutdown() + +// _, err = m1.Join([]string{c2.BindAddr}) +// if err != nil { +// t.Fatalf("err: %s", err) +// } + +// yield() +// yield() + +// // Check the updates have propagated +// var roles map[string]string + +// // Check the roles of members of m1 +// m1m := m1.Members() +// if len(m1m) != 2 { +// t.Fatalf("bad: %#v", m1m) +// } + +// roles = make(map[string]string) +// for _, m := range m1m { +// roles[m.Name] = string(m.Meta) +// } + +// if r := roles[c1.Name]; r != "api" { +// t.Fatalf("bad role for %s: %s", c1.Name, r) +// } + +// if r := roles[c2.Name]; r != "lb" { +// t.Fatalf("bad role for %s: %s", c2.Name, r) +// } + +// // Check the roles of members of m2 +// m2m := m2.Members() +// if len(m2m) != 2 { +// t.Fatalf("bad: %#v", m2m) +// } + +// roles = make(map[string]string) +// for _, m := range m2m { +// roles[m.Name] = string(m.Meta) +// } + +// if r := roles[c1.Name]; r != "api" { +// t.Fatalf("bad role for %s: %s", c1.Name, r) +// } + +// if r := roles[c2.Name]; r != "lb" { +// t.Fatalf("bad role for %s: %s", c2.Name, r) +// } +//} diff --git a/vendor/github.com/hashicorp/memberlist/net_test.go b/vendor/github.com/hashicorp/memberlist/net_test.go new file mode 100644 index 0000000000..8605358556 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/net_test.go @@ -0,0 +1,814 @@ +package memberlist + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "log" + "net" + "reflect" + "strings" + "testing" + "time" + + "github.com/hashicorp/go-msgpack/codec" +) + +// As a regression we left this test very low-level and network-ey, even after +// we abstracted the transport. We added some basic network-free transport tests +// in transport_test.go to prove that we didn't hard code some network stuff +// outside of NetTransport. + +func TestHandleCompoundPing(t *testing.T) { + m := GetMemberlist(t) + m.config.EnableCompression = false + defer m.Shutdown() + + var udp *net.UDPConn + for port := 60000; port < 61000; port++ { + udpAddr := fmt.Sprintf("127.0.0.1:%d", port) + udpLn, err := net.ListenPacket("udp", udpAddr) + if err == nil { + udp = udpLn.(*net.UDPConn) + break + } + } + + if udp == nil { + t.Fatalf("no udp listener") + } + + // Encode a ping + ping := ping{SeqNo: 42} + buf, err := encode(pingMsg, ping) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + + // Make a compound message + compound := makeCompoundMessage([][]byte{buf.Bytes(), buf.Bytes(), buf.Bytes()}) + + // Send compound version + addr := &net.UDPAddr{IP: net.ParseIP(m.config.BindAddr), Port: m.config.BindPort} + udp.WriteTo(compound.Bytes(), addr) + + // Wait for responses + doneCh := make(chan struct{}, 1) + go func() { + select { + case <-doneCh: + case <-time.After(2 * time.Second): + panic("timeout") + } + }() + + for i := 0; i < 3; i++ { + in := make([]byte, 1500) + n, _, err := udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + msgType := messageType(in[0]) + if msgType != ackRespMsg { + t.Fatalf("bad response %v", in) + } + + var ack ackResp + if err := decode(in[1:], &ack); err != nil { + t.Fatalf("unexpected err %s", err) + } + + if ack.SeqNo != 42 { + t.Fatalf("bad sequence no") + } + } + + doneCh <- struct{}{} +} + +func TestHandlePing(t *testing.T) { + m := GetMemberlist(t) + m.config.EnableCompression = false + defer m.Shutdown() + + var udp *net.UDPConn + for port := 60000; port < 61000; port++ { + udpAddr := fmt.Sprintf("127.0.0.1:%d", port) + udpLn, err := net.ListenPacket("udp", udpAddr) + if err == nil { + udp = udpLn.(*net.UDPConn) + break + } + } + + if udp == nil { + t.Fatalf("no udp listener") + } + + // Encode a ping + ping := ping{SeqNo: 42} + buf, err := encode(pingMsg, ping) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + + // Send + addr := &net.UDPAddr{IP: net.ParseIP(m.config.BindAddr), Port: m.config.BindPort} + udp.WriteTo(buf.Bytes(), addr) + + // Wait for response + doneCh := make(chan struct{}, 1) + go func() { + select { + case <-doneCh: + case <-time.After(2 * time.Second): + panic("timeout") + } + }() + + in := make([]byte, 1500) + n, _, err := udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + msgType := messageType(in[0]) + if msgType != ackRespMsg { + t.Fatalf("bad response %v", in) + } + + var ack ackResp + if err := decode(in[1:], &ack); err != nil { + t.Fatalf("unexpected err %s", err) + } + + if ack.SeqNo != 42 { + t.Fatalf("bad sequence no") + } + + doneCh <- struct{}{} +} + +func TestHandlePing_WrongNode(t *testing.T) { + m := GetMemberlist(t) + m.config.EnableCompression = false + defer m.Shutdown() + + var udp *net.UDPConn + for port := 60000; port < 61000; port++ { + udpAddr := fmt.Sprintf("127.0.0.1:%d", port) + udpLn, err := net.ListenPacket("udp", udpAddr) + if err == nil { + udp = udpLn.(*net.UDPConn) + break + } + } + + if udp == nil { + t.Fatalf("no udp listener") + } + + // Encode a ping, wrong node! + ping := ping{SeqNo: 42, Node: m.config.Name + "-bad"} + buf, err := encode(pingMsg, ping) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + + // Send + addr := &net.UDPAddr{IP: net.ParseIP(m.config.BindAddr), Port: m.config.BindPort} + udp.WriteTo(buf.Bytes(), addr) + + // Wait for response + udp.SetDeadline(time.Now().Add(50 * time.Millisecond)) + in := make([]byte, 1500) + _, _, err = udp.ReadFrom(in) + + // Should get an i/o timeout + if err == nil { + t.Fatalf("expected err %s", err) + } +} + +func TestHandleIndirectPing(t *testing.T) { + m := GetMemberlist(t) + m.config.EnableCompression = false + defer m.Shutdown() + + var udp *net.UDPConn + for port := 60000; port < 61000; port++ { + udpAddr := fmt.Sprintf("127.0.0.1:%d", port) + udpLn, err := net.ListenPacket("udp", udpAddr) + if err == nil { + udp = udpLn.(*net.UDPConn) + break + } + } + + if udp == nil { + t.Fatalf("no udp listener") + } + + // Encode an indirect ping + ind := indirectPingReq{ + SeqNo: 100, + Target: net.ParseIP(m.config.BindAddr), + Port: uint16(m.config.BindPort), + } + buf, err := encode(indirectPingMsg, &ind) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + + // Send + addr := &net.UDPAddr{IP: net.ParseIP(m.config.BindAddr), Port: m.config.BindPort} + udp.WriteTo(buf.Bytes(), addr) + + // Wait for response + doneCh := make(chan struct{}, 1) + go func() { + select { + case <-doneCh: + case <-time.After(2 * time.Second): + panic("timeout") + } + }() + + in := make([]byte, 1500) + n, _, err := udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + msgType := messageType(in[0]) + if msgType != ackRespMsg { + t.Fatalf("bad response %v", in) + } + + var ack ackResp + if err := decode(in[1:], &ack); err != nil { + t.Fatalf("unexpected err %s", err) + } + + if ack.SeqNo != 100 { + t.Fatalf("bad sequence no") + } + + doneCh <- struct{}{} +} + +func TestTCPPing(t *testing.T) { + var tcp *net.TCPListener + var tcpAddr *net.TCPAddr + for port := 60000; port < 61000; port++ { + tcpAddr = &net.TCPAddr{IP: net.ParseIP("127.0.0.1"), Port: port} + tcpLn, err := net.ListenTCP("tcp", tcpAddr) + if err == nil { + tcp = tcpLn + break + } + } + if tcp == nil { + t.Fatalf("no tcp listener") + } + + // Note that tcp gets closed in the last test, so we avoid a deferred + // Close() call here. + + m := GetMemberlist(t) + defer m.Shutdown() + pingTimeout := m.config.ProbeInterval + pingTimeMax := m.config.ProbeInterval + 10*time.Millisecond + + // Do a normal round trip. + pingOut := ping{SeqNo: 23, Node: "mongo"} + go func() { + tcp.SetDeadline(time.Now().Add(pingTimeMax)) + conn, err := tcp.AcceptTCP() + if err != nil { + t.Fatalf("failed to connect: %s", err) + } + defer conn.Close() + + msgType, _, dec, err := m.readStream(conn) + if err != nil { + t.Fatalf("failed to read ping: %s", err) + } + + if msgType != pingMsg { + t.Fatalf("expecting ping, got message type (%d)", msgType) + } + + var pingIn ping + if err := dec.Decode(&pingIn); err != nil { + t.Fatalf("failed to decode ping: %s", err) + } + + if pingIn.SeqNo != pingOut.SeqNo { + t.Fatalf("sequence number isn't correct (%d) vs (%d)", pingIn.SeqNo, pingOut.SeqNo) + } + + if pingIn.Node != pingOut.Node { + t.Fatalf("node name isn't correct (%s) vs (%s)", pingIn.Node, pingOut.Node) + } + + ack := ackResp{pingIn.SeqNo, nil} + out, err := encode(ackRespMsg, &ack) + if err != nil { + t.Fatalf("failed to encode ack: %s", err) + } + + err = m.rawSendMsgStream(conn, out.Bytes()) + if err != nil { + t.Fatalf("failed to send ack: %s", err) + } + }() + deadline := time.Now().Add(pingTimeout) + didContact, err := m.sendPingAndWaitForAck(tcpAddr.String(), pingOut, deadline) + if err != nil { + t.Fatalf("error trying to ping: %s", err) + } + if !didContact { + t.Fatalf("expected successful ping") + } + + // Make sure a mis-matched sequence number is caught. + go func() { + tcp.SetDeadline(time.Now().Add(pingTimeMax)) + conn, err := tcp.AcceptTCP() + if err != nil { + t.Fatalf("failed to connect: %s", err) + } + defer conn.Close() + + _, _, dec, err := m.readStream(conn) + if err != nil { + t.Fatalf("failed to read ping: %s", err) + } + + var pingIn ping + if err := dec.Decode(&pingIn); err != nil { + t.Fatalf("failed to decode ping: %s", err) + } + + ack := ackResp{pingIn.SeqNo + 1, nil} + out, err := encode(ackRespMsg, &ack) + if err != nil { + t.Fatalf("failed to encode ack: %s", err) + } + + err = m.rawSendMsgStream(conn, out.Bytes()) + if err != nil { + t.Fatalf("failed to send ack: %s", err) + } + }() + deadline = time.Now().Add(pingTimeout) + didContact, err = m.sendPingAndWaitForAck(tcpAddr.String(), pingOut, deadline) + if err == nil || !strings.Contains(err.Error(), "Sequence number") { + t.Fatalf("expected an error from mis-matched sequence number") + } + if didContact { + t.Fatalf("expected failed ping") + } + + // Make sure an unexpected message type is handled gracefully. + go func() { + tcp.SetDeadline(time.Now().Add(pingTimeMax)) + conn, err := tcp.AcceptTCP() + if err != nil { + t.Fatalf("failed to connect: %s", err) + } + defer conn.Close() + + _, _, _, err = m.readStream(conn) + if err != nil { + t.Fatalf("failed to read ping: %s", err) + } + + bogus := indirectPingReq{} + out, err := encode(indirectPingMsg, &bogus) + if err != nil { + t.Fatalf("failed to encode bogus msg: %s", err) + } + + err = m.rawSendMsgStream(conn, out.Bytes()) + if err != nil { + t.Fatalf("failed to send bogus msg: %s", err) + } + }() + deadline = time.Now().Add(pingTimeout) + didContact, err = m.sendPingAndWaitForAck(tcpAddr.String(), pingOut, deadline) + if err == nil || !strings.Contains(err.Error(), "Unexpected msgType") { + t.Fatalf("expected an error from bogus message") + } + if didContact { + t.Fatalf("expected failed ping") + } + + // Make sure failed I/O respects the deadline. In this case we try the + // common case of the receiving node being totally down. + tcp.Close() + deadline = time.Now().Add(pingTimeout) + startPing := time.Now() + didContact, err = m.sendPingAndWaitForAck(tcpAddr.String(), pingOut, deadline) + pingTime := time.Now().Sub(startPing) + if err != nil { + t.Fatalf("expected no error during ping on closed socket, got: %s", err) + } + if didContact { + t.Fatalf("expected failed ping") + } + if pingTime > pingTimeMax { + t.Fatalf("took too long to fail ping, %9.6f", pingTime.Seconds()) + } +} + +func TestTCPPushPull(t *testing.T) { + m := GetMemberlist(t) + defer m.Shutdown() + m.nodes = append(m.nodes, &nodeState{ + Node: Node{ + Name: "Test 0", + Addr: net.ParseIP(m.config.BindAddr), + Port: uint16(m.config.BindPort), + }, + Incarnation: 0, + State: stateSuspect, + StateChange: time.Now().Add(-1 * time.Second), + }) + + addr := fmt.Sprintf("%s:%d", m.config.BindAddr, m.config.BindPort) + conn, err := net.Dial("tcp", addr) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + defer conn.Close() + + localNodes := make([]pushNodeState, 3) + localNodes[0].Name = "Test 0" + localNodes[0].Addr = net.ParseIP(m.config.BindAddr) + localNodes[0].Port = uint16(m.config.BindPort) + localNodes[0].Incarnation = 1 + localNodes[0].State = stateAlive + localNodes[1].Name = "Test 1" + localNodes[1].Addr = net.ParseIP(m.config.BindAddr) + localNodes[1].Port = uint16(m.config.BindPort) + localNodes[1].Incarnation = 1 + localNodes[1].State = stateAlive + localNodes[2].Name = "Test 2" + localNodes[2].Addr = net.ParseIP(m.config.BindAddr) + localNodes[2].Port = uint16(m.config.BindPort) + localNodes[2].Incarnation = 1 + localNodes[2].State = stateAlive + + // Send our node state + header := pushPullHeader{Nodes: 3} + hd := codec.MsgpackHandle{} + enc := codec.NewEncoder(conn, &hd) + + // Send the push/pull indicator + conn.Write([]byte{byte(pushPullMsg)}) + + if err := enc.Encode(&header); err != nil { + t.Fatalf("unexpected err %s", err) + } + for i := 0; i < header.Nodes; i++ { + if err := enc.Encode(&localNodes[i]); err != nil { + t.Fatalf("unexpected err %s", err) + } + } + + // Read the message type + var msgType messageType + if err := binary.Read(conn, binary.BigEndian, &msgType); err != nil { + t.Fatalf("unexpected err %s", err) + } + + var bufConn io.Reader = conn + msghd := codec.MsgpackHandle{} + dec := codec.NewDecoder(bufConn, &msghd) + + // Check if we have a compressed message + if msgType == compressMsg { + var c compress + if err := dec.Decode(&c); err != nil { + t.Fatalf("unexpected err %s", err) + } + decomp, err := decompressBuffer(&c) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + + // Reset the message type + msgType = messageType(decomp[0]) + + // Create a new bufConn + bufConn = bytes.NewReader(decomp[1:]) + + // Create a new decoder + dec = codec.NewDecoder(bufConn, &hd) + } + + // Quit if not push/pull + if msgType != pushPullMsg { + t.Fatalf("bad message type") + } + + if err := dec.Decode(&header); err != nil { + t.Fatalf("unexpected err %s", err) + } + + // Allocate space for the transfer + remoteNodes := make([]pushNodeState, header.Nodes) + + // Try to decode all the states + for i := 0; i < header.Nodes; i++ { + if err := dec.Decode(&remoteNodes[i]); err != nil { + t.Fatalf("unexpected err %s", err) + } + } + + if len(remoteNodes) != 1 { + t.Fatalf("bad response") + } + + n := &remoteNodes[0] + if n.Name != "Test 0" { + t.Fatalf("bad name") + } + if bytes.Compare(n.Addr, net.ParseIP(m.config.BindAddr)) != 0 { + t.Fatal("bad addr") + } + if n.Incarnation != 0 { + t.Fatal("bad incarnation") + } + if n.State != stateSuspect { + t.Fatal("bad state") + } +} + +func TestSendMsg_Piggyback(t *testing.T) { + m := GetMemberlist(t) + defer m.Shutdown() + + // Add a message to be broadcast + a := alive{ + Incarnation: 10, + Node: "rand", + Addr: []byte{127, 0, 0, 255}, + Meta: nil, + } + m.encodeAndBroadcast("rand", aliveMsg, &a) + + var udp *net.UDPConn + for port := 60000; port < 61000; port++ { + udpAddr := fmt.Sprintf("127.0.0.1:%d", port) + udpLn, err := net.ListenPacket("udp", udpAddr) + if err == nil { + udp = udpLn.(*net.UDPConn) + break + } + } + + // Encode a ping + ping := ping{SeqNo: 42} + buf, err := encode(pingMsg, ping) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + + // Send + addr := &net.UDPAddr{IP: net.ParseIP(m.config.BindAddr), Port: m.config.BindPort} + udp.WriteTo(buf.Bytes(), addr) + + // Wait for response + doneCh := make(chan struct{}, 1) + go func() { + select { + case <-doneCh: + case <-time.After(2 * time.Second): + panic("timeout") + } + }() + + in := make([]byte, 1500) + n, _, err := udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + msgType := messageType(in[0]) + if msgType != compoundMsg { + t.Fatalf("bad response %v", in) + } + + // get the parts + trunc, parts, err := decodeCompoundMessage(in[1:]) + if trunc != 0 { + t.Fatalf("unexpected truncation") + } + if len(parts) != 2 { + t.Fatalf("unexpected parts %v", parts) + } + if err != nil { + t.Fatalf("unexpected err %s", err) + } + + var ack ackResp + if err := decode(parts[0][1:], &ack); err != nil { + t.Fatalf("unexpected err %s", err) + } + + if ack.SeqNo != 42 { + t.Fatalf("bad sequence no") + } + + var aliveout alive + if err := decode(parts[1][1:], &aliveout); err != nil { + t.Fatalf("unexpected err %s", err) + } + + if aliveout.Node != "rand" || aliveout.Incarnation != 10 { + t.Fatalf("bad mesg") + } + + doneCh <- struct{}{} +} + +func TestEncryptDecryptState(t *testing.T) { + state := []byte("this is our internal state...") + config := &Config{ + SecretKey: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + ProtocolVersion: ProtocolVersionMax, + } + + m, err := Create(config) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m.Shutdown() + + crypt, err := m.encryptLocalState(state) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Create reader, seek past the type byte + buf := bytes.NewReader(crypt) + buf.Seek(1, 0) + + plain, err := m.decryptRemoteState(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + + if !reflect.DeepEqual(state, plain) { + t.Fatalf("Decrypt failed: %v", plain) + } +} + +func TestRawSendUdp_CRC(t *testing.T) { + m := GetMemberlist(t) + m.config.EnableCompression = false + defer m.Shutdown() + + var udp *net.UDPConn + for port := 60000; port < 61000; port++ { + udpAddr := fmt.Sprintf("127.0.0.1:%d", port) + udpLn, err := net.ListenPacket("udp", udpAddr) + if err == nil { + udp = udpLn.(*net.UDPConn) + break + } + } + + if udp == nil { + t.Fatalf("no udp listener") + } + + // Pass a nil node with no nodes registered, should result in no checksum + payload := []byte{3, 3, 3, 3} + m.rawSendMsgPacket(udp.LocalAddr().String(), nil, payload) + + in := make([]byte, 1500) + n, _, err := udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + if len(in) != 4 { + t.Fatalf("bad: %v", in) + } + + // Pass a non-nil node with PMax >= 5, should result in a checksum + m.rawSendMsgPacket(udp.LocalAddr().String(), &Node{PMax: 5}, payload) + + in = make([]byte, 1500) + n, _, err = udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + if len(in) != 9 { + t.Fatalf("bad: %v", in) + } + + // Register a node with PMax >= 5 to be looked up, should result in a checksum + m.nodeMap["127.0.0.1"] = &nodeState{ + Node: Node{PMax: 5}, + } + m.rawSendMsgPacket(udp.LocalAddr().String(), nil, payload) + + in = make([]byte, 1500) + n, _, err = udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + if len(in) != 9 { + t.Fatalf("bad: %v", in) + } +} + +func TestIngestPacket_CRC(t *testing.T) { + m := GetMemberlist(t) + m.config.EnableCompression = false + defer m.Shutdown() + + var udp *net.UDPConn + for port := 60000; port < 61000; port++ { + udpAddr := fmt.Sprintf("127.0.0.1:%d", port) + udpLn, err := net.ListenPacket("udp", udpAddr) + if err == nil { + udp = udpLn.(*net.UDPConn) + break + } + } + + if udp == nil { + t.Fatalf("no udp listener") + } + + // Get a message with a checksum + payload := []byte{3, 3, 3, 3} + m.rawSendMsgPacket(udp.LocalAddr().String(), &Node{PMax: 5}, payload) + + in := make([]byte, 1500) + n, _, err := udp.ReadFrom(in) + if err != nil { + t.Fatalf("unexpected err %s", err) + } + in = in[0:n] + + if len(in) != 9 { + t.Fatalf("bad: %v", in) + } + + // Corrupt the checksum + in[1] <<= 1 + + logs := &bytes.Buffer{} + logger := log.New(logs, "", 0) + m.logger = logger + m.ingestPacket(in, udp.LocalAddr(), time.Now()) + + if !strings.Contains(logs.String(), "invalid checksum") { + t.Fatalf("bad: %s", logs.String()) + } +} + +func TestGossip_MismatchedKeys(t *testing.T) { + c1 := testConfig() + c2 := testConfig() + + // Create two agents with different gossip keys + c1.SecretKey = []byte("4W6DGn2VQVqDEceOdmuRTQ==") + c2.SecretKey = []byte("XhX/w702/JKKK7/7OtM9Ww==") + + m1, err := Create(c1) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m1.Shutdown() + + m2, err := Create(c2) + if err != nil { + t.Fatalf("err: %s", err) + } + defer m2.Shutdown() + + // Make sure we get this error on the joining side + _, err = m2.Join([]string{c1.BindAddr}) + if err == nil || !strings.Contains(err.Error(), "No installed keys could decrypt the message") { + t.Fatalf("bad: %s", err) + } +} diff --git a/vendor/github.com/hashicorp/memberlist/queue_test.go b/vendor/github.com/hashicorp/memberlist/queue_test.go new file mode 100644 index 0000000000..765a3b53d7 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/queue_test.go @@ -0,0 +1,172 @@ +package memberlist + +import ( + "testing" +) + +func TestTransmitLimited_Queue(t *testing.T) { + q := &TransmitLimitedQueue{RetransmitMult: 1, NumNodes: func() int { return 1 }} + q.QueueBroadcast(&memberlistBroadcast{"test", nil, nil}) + q.QueueBroadcast(&memberlistBroadcast{"foo", nil, nil}) + q.QueueBroadcast(&memberlistBroadcast{"bar", nil, nil}) + + if len(q.bcQueue) != 3 { + t.Fatalf("bad len") + } + if q.bcQueue[0].b.(*memberlistBroadcast).node != "test" { + t.Fatalf("missing test") + } + if q.bcQueue[1].b.(*memberlistBroadcast).node != "foo" { + t.Fatalf("missing foo") + } + if q.bcQueue[2].b.(*memberlistBroadcast).node != "bar" { + t.Fatalf("missing bar") + } + + // Should invalidate previous message + q.QueueBroadcast(&memberlistBroadcast{"test", nil, nil}) + + if len(q.bcQueue) != 3 { + t.Fatalf("bad len") + } + if q.bcQueue[0].b.(*memberlistBroadcast).node != "foo" { + t.Fatalf("missing foo") + } + if q.bcQueue[1].b.(*memberlistBroadcast).node != "bar" { + t.Fatalf("missing bar") + } + if q.bcQueue[2].b.(*memberlistBroadcast).node != "test" { + t.Fatalf("missing test") + } +} + +func TestTransmitLimited_GetBroadcasts(t *testing.T) { + q := &TransmitLimitedQueue{RetransmitMult: 3, NumNodes: func() int { return 10 }} + + // 18 bytes per message + q.QueueBroadcast(&memberlistBroadcast{"test", []byte("1. this is a test."), nil}) + q.QueueBroadcast(&memberlistBroadcast{"foo", []byte("2. this is a test."), nil}) + q.QueueBroadcast(&memberlistBroadcast{"bar", []byte("3. this is a test."), nil}) + q.QueueBroadcast(&memberlistBroadcast{"baz", []byte("4. this is a test."), nil}) + + // 2 byte overhead per message, should get all 4 messages + all := q.GetBroadcasts(2, 80) + if len(all) != 4 { + t.Fatalf("missing messages: %v", all) + } + + // 3 byte overhead, should only get 3 messages back + partial := q.GetBroadcasts(3, 80) + if len(partial) != 3 { + t.Fatalf("missing messages: %v", partial) + } +} + +func TestTransmitLimited_GetBroadcasts_Limit(t *testing.T) { + q := &TransmitLimitedQueue{RetransmitMult: 1, NumNodes: func() int { return 10 }} + + // 18 bytes per message + q.QueueBroadcast(&memberlistBroadcast{"test", []byte("1. this is a test."), nil}) + q.QueueBroadcast(&memberlistBroadcast{"foo", []byte("2. this is a test."), nil}) + q.QueueBroadcast(&memberlistBroadcast{"bar", []byte("3. this is a test."), nil}) + q.QueueBroadcast(&memberlistBroadcast{"baz", []byte("4. this is a test."), nil}) + + // 3 byte overhead, should only get 3 messages back + partial1 := q.GetBroadcasts(3, 80) + if len(partial1) != 3 { + t.Fatalf("missing messages: %v", partial1) + } + + partial2 := q.GetBroadcasts(3, 80) + if len(partial2) != 3 { + t.Fatalf("missing messages: %v", partial2) + } + + // Only two not expired + partial3 := q.GetBroadcasts(3, 80) + if len(partial3) != 2 { + t.Fatalf("missing messages: %v", partial3) + } + + // Should get nothing + partial5 := q.GetBroadcasts(3, 80) + if len(partial5) != 0 { + t.Fatalf("missing messages: %v", partial5) + } +} + +func TestTransmitLimited_Prune(t *testing.T) { + q := &TransmitLimitedQueue{RetransmitMult: 1, NumNodes: func() int { return 10 }} + + ch1 := make(chan struct{}, 1) + ch2 := make(chan struct{}, 1) + + // 18 bytes per message + q.QueueBroadcast(&memberlistBroadcast{"test", []byte("1. this is a test."), ch1}) + q.QueueBroadcast(&memberlistBroadcast{"foo", []byte("2. this is a test."), ch2}) + q.QueueBroadcast(&memberlistBroadcast{"bar", []byte("3. this is a test."), nil}) + q.QueueBroadcast(&memberlistBroadcast{"baz", []byte("4. this is a test."), nil}) + + // Keep only 2 + q.Prune(2) + + if q.NumQueued() != 2 { + t.Fatalf("bad len") + } + + // Should notify the first two + select { + case <-ch1: + default: + t.Fatalf("expected invalidation") + } + select { + case <-ch2: + default: + t.Fatalf("expected invalidation") + } + + if q.bcQueue[0].b.(*memberlistBroadcast).node != "bar" { + t.Fatalf("missing bar") + } + if q.bcQueue[1].b.(*memberlistBroadcast).node != "baz" { + t.Fatalf("missing baz") + } +} + +func TestLimitedBroadcastSort(t *testing.T) { + bc := limitedBroadcasts([]*limitedBroadcast{ + &limitedBroadcast{ + transmits: 0, + }, + &limitedBroadcast{ + transmits: 10, + }, + &limitedBroadcast{ + transmits: 3, + }, + &limitedBroadcast{ + transmits: 4, + }, + &limitedBroadcast{ + transmits: 7, + }, + }) + bc.Sort() + + if bc[0].transmits != 10 { + t.Fatalf("bad val %v", bc[0]) + } + if bc[1].transmits != 7 { + t.Fatalf("bad val %v", bc[7]) + } + if bc[2].transmits != 4 { + t.Fatalf("bad val %v", bc[2]) + } + if bc[3].transmits != 3 { + t.Fatalf("bad val %v", bc[3]) + } + if bc[4].transmits != 0 { + t.Fatalf("bad val %v", bc[4]) + } +} diff --git a/vendor/github.com/hashicorp/memberlist/security_test.go b/vendor/github.com/hashicorp/memberlist/security_test.go new file mode 100644 index 0000000000..15fa4aa8ed --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/security_test.go @@ -0,0 +1,70 @@ +package memberlist + +import ( + "bytes" + "reflect" + "testing" +) + +func TestPKCS7(t *testing.T) { + for i := 0; i <= 255; i++ { + // Make a buffer of size i + buf := []byte{} + for j := 0; j < i; j++ { + buf = append(buf, byte(i)) + } + + // Copy to bytes buffer + inp := bytes.NewBuffer(nil) + inp.Write(buf) + + // Pad this out + pkcs7encode(inp, 0, 16) + + // Unpad + dec := pkcs7decode(inp.Bytes(), 16) + + // Ensure equivilence + if !reflect.DeepEqual(buf, dec) { + t.Fatalf("mismatch: %v %v", buf, dec) + } + } + +} + +func TestEncryptDecrypt_V0(t *testing.T) { + encryptDecryptVersioned(0, t) +} + +func TestEncryptDecrypt_V1(t *testing.T) { + encryptDecryptVersioned(1, t) +} + +func encryptDecryptVersioned(vsn encryptionVersion, t *testing.T) { + k1 := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15} + plaintext := []byte("this is a plain text message") + extra := []byte("random data") + + var buf bytes.Buffer + err := encryptPayload(vsn, k1, plaintext, extra, &buf) + if err != nil { + t.Fatalf("err: %v", err) + } + + expLen := encryptedLength(vsn, len(plaintext)) + if buf.Len() != expLen { + t.Fatalf("output length is unexpected %d %d %d", len(plaintext), buf.Len(), expLen) + } + + msg, err := decryptPayload([][]byte{k1}, buf.Bytes(), extra) + if err != nil { + t.Fatalf("err: %v", err) + } + + cmp := bytes.Compare(msg, plaintext) + if cmp != 0 { + t.Errorf("len %d %v", len(msg), msg) + t.Errorf("len %d %v", len(plaintext), plaintext) + t.Fatalf("encrypt/decrypt failed! %d '%s' '%s'", cmp, msg, plaintext) + } +} diff --git a/vendor/github.com/hashicorp/memberlist/state_test.go b/vendor/github.com/hashicorp/memberlist/state_test.go new file mode 100644 index 0000000000..71e93ca4e4 --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/state_test.go @@ -0,0 +1,1900 @@ +package memberlist + +import ( + "bytes" + "fmt" + "net" + "testing" + "time" +) + +func HostMemberlist(host string, t *testing.T, f func(*Config)) *Memberlist { + c := DefaultLANConfig() + c.Name = host + c.BindAddr = host + if f != nil { + f(c) + } + + m, err := newMemberlist(c) + if err != nil { + t.Fatalf("failed to get memberlist: %s", err) + } + return m +} + +func TestMemberList_Probe(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = time.Millisecond + c.ProbeInterval = 10 * time.Millisecond + }) + m2 := HostMemberlist(addr2.String(), t, nil) + + a1 := alive{ + Node: addr1.String(), + Addr: []byte(addr1), + Port: uint16(m1.config.BindPort), + Incarnation: 1, + } + m1.aliveNode(&a1, nil, true) + a2 := alive{ + Node: addr2.String(), + Addr: []byte(addr2), + Port: uint16(m2.config.BindPort), + Incarnation: 1, + } + m1.aliveNode(&a2, nil, false) + + // should ping addr2 + m1.probe() + + // Should not be marked suspect + n := m1.nodeMap[addr2.String()] + if n.State != stateAlive { + t.Fatalf("Expect node to be alive") + } + + // Should increment seqno + if m1.sequenceNum != 1 { + t.Fatalf("bad seqno %v", m2.sequenceNum) + } +} + +func TestMemberList_ProbeNode_Suspect(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + addr3 := getBindAddr() + addr4 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + ip3 := []byte(addr3) + ip4 := []byte(addr4) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = time.Millisecond + c.ProbeInterval = 10 * time.Millisecond + }) + m2 := HostMemberlist(addr2.String(), t, nil) + m3 := HostMemberlist(addr3.String(), t, nil) + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + a3 := alive{Node: addr3.String(), Addr: ip3, Port: 7946, Incarnation: 1} + m1.aliveNode(&a3, nil, false) + a4 := alive{Node: addr4.String(), Addr: ip4, Port: 7946, Incarnation: 1} + m1.aliveNode(&a4, nil, false) + + n := m1.nodeMap[addr4.String()] + m1.probeNode(n) + + // Should be marked suspect. + if n.State != stateSuspect { + t.Fatalf("Expect node to be suspect") + } + time.Sleep(10 * time.Millisecond) + + // One of the peers should have attempted an indirect probe. + if m2.sequenceNum != 1 && m3.sequenceNum != 1 { + t.Fatalf("bad seqnos %v, %v", m2.sequenceNum, m3.sequenceNum) + } +} + +func TestMemberList_ProbeNode_Suspect_Dogpile(t *testing.T) { + cases := []struct { + numPeers int + confirmations int + expected time.Duration + }{ + {1, 0, 500 * time.Millisecond}, // n=2, k=3 (max timeout disabled) + {2, 0, 500 * time.Millisecond}, // n=3, k=3 + {3, 0, 500 * time.Millisecond}, // n=4, k=3 + {4, 0, 1000 * time.Millisecond}, // n=5, k=3 (max timeout starts to take effect) + {5, 0, 1000 * time.Millisecond}, // n=6, k=3 + {5, 1, 750 * time.Millisecond}, // n=6, k=3 (confirmations start to lower timeout) + {5, 2, 604 * time.Millisecond}, // n=6, k=3 + {5, 3, 500 * time.Millisecond}, // n=6, k=3 (timeout driven to nominal value) + {5, 4, 500 * time.Millisecond}, // n=6, k=3 + } + for i, c := range cases { + // Create the main memberlist under test. + addr := getBindAddr() + m := HostMemberlist(addr.String(), t, func(c *Config) { + c.ProbeTimeout = time.Millisecond + c.ProbeInterval = 100 * time.Millisecond + c.SuspicionMult = 5 + c.SuspicionMaxTimeoutMult = 2 + }) + a := alive{Node: addr.String(), Addr: []byte(addr), Port: 7946, Incarnation: 1} + m.aliveNode(&a, nil, true) + + // Make all but one peer be an real, alive instance. + var peers []*Memberlist + for j := 0; j < c.numPeers-1; j++ { + peerAddr := getBindAddr() + peers = append(peers, HostMemberlist(peerAddr.String(), t, nil)) + a = alive{Node: peerAddr.String(), Addr: []byte(peerAddr), Port: 7946, Incarnation: 1} + m.aliveNode(&a, nil, false) + } + + // Just use a bogus address for the last peer so it doesn't respond + // to pings, but tell the memberlist it's alive. + badPeerAddr := getBindAddr() + a = alive{Node: badPeerAddr.String(), Addr: []byte(badPeerAddr), Port: 7946, Incarnation: 1} + m.aliveNode(&a, nil, false) + + // Force a probe, which should start us into the suspect state. + n := m.nodeMap[badPeerAddr.String()] + m.probeNode(n) + if n.State != stateSuspect { + t.Fatalf("case %d: expected node to be suspect", i) + } + + // Add the requested number of confirmations. + for j := 0; j < c.confirmations; j++ { + from := fmt.Sprintf("peer%d", j) + s := suspect{Node: badPeerAddr.String(), Incarnation: 1, From: from} + m.suspectNode(&s) + } + + // Wait until right before the timeout and make sure the timer + // hasn't fired. + fudge := 25 * time.Millisecond + time.Sleep(c.expected - fudge) + if n.State != stateSuspect { + t.Fatalf("case %d: expected node to still be suspect", i) + } + + // Wait through the timeout and a little after to make sure the + // timer fires. + time.Sleep(2 * fudge) + if n.State != stateDead { + t.Fatalf("case %d: expected node to be dead", i) + } + } +} + +/* +func TestMemberList_ProbeNode_FallbackTCP(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + addr3 := getBindAddr() + addr4 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + ip3 := []byte(addr3) + ip4 := []byte(addr4) + + var probeTimeMax time.Duration + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + probeTimeMax = c.ProbeInterval + 20*time.Millisecond + }) + defer m1.Shutdown() + + m2 := HostMemberlist(addr2.String(), t, nil) + defer m2.Shutdown() + + m3 := HostMemberlist(addr3.String(), t, nil) + defer m3.Shutdown() + + m4 := HostMemberlist(addr4.String(), t, nil) + defer m4.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + a3 := alive{Node: addr3.String(), Addr: ip3, Port: 7946, Incarnation: 1} + m1.aliveNode(&a3, nil, false) + + // Make sure m4 is configured with the same protocol version as m1 so + // the TCP fallback behavior is enabled. + a4 := alive{ + Node: addr4.String(), + Addr: ip4, + Port: 7946, + Incarnation: 1, + Vsn: []uint8{ + ProtocolVersionMin, + ProtocolVersionMax, + m1.config.ProtocolVersion, + m1.config.DelegateProtocolMin, + m1.config.DelegateProtocolMax, + m1.config.DelegateProtocolVersion, + }, + } + m1.aliveNode(&a4, nil, false) + + // Isolate m4 from UDP traffic by re-opening its listener on the wrong + // port. This should force the TCP fallback path to be used. + var err error + if err = m4.udpListener.Close(); err != nil { + t.Fatalf("err: %v", err) + } + udpAddr := &net.UDPAddr{IP: ip4, Port: 9999} + if m4.udpListener, err = net.ListenUDP("udp", udpAddr); err != nil { + t.Fatalf("err: %v", err) + } + + // Have node m1 probe m4. + n := m1.nodeMap[addr4.String()] + startProbe := time.Now() + m1.probeNode(n) + probeTime := time.Now().Sub(startProbe) + + // Should be marked alive because of the TCP fallback ping. + if n.State != stateAlive { + t.Fatalf("expect node to be alive") + } + + // Make sure TCP activity completed in a timely manner. + if probeTime > probeTimeMax { + t.Fatalf("took to long to probe, %9.6f", probeTime.Seconds()) + } + + // Confirm at least one of the peers attempted an indirect probe. + time.Sleep(probeTimeMax) + if m2.sequenceNum != 1 && m3.sequenceNum != 1 { + t.Fatalf("bad seqnos %v, %v", m2.sequenceNum, m3.sequenceNum) + } + + // Now shutdown all inbound TCP traffic to make sure the TCP fallback + // path properly fails when the node is really unreachable. + if err = m4.tcpListener.Close(); err != nil { + t.Fatalf("err: %v", err) + } + tcpAddr := &net.TCPAddr{IP: ip4, Port: 9999} + if m4.tcpListener, err = net.ListenTCP("tcp", tcpAddr); err != nil { + t.Fatalf("err: %v", err) + } + + // Probe again, this time there should be no contact. + startProbe = time.Now() + m1.probeNode(n) + probeTime = time.Now().Sub(startProbe) + + // Node should be reported suspect. + if n.State != stateSuspect { + t.Fatalf("expect node to be suspect") + } + + // Make sure TCP activity didn't cause us to wait too long before + // timing out. + if probeTime > probeTimeMax { + t.Fatalf("took to long to probe, %9.6f", probeTime.Seconds()) + } + + // Confirm at least one of the peers attempted an indirect probe. + time.Sleep(probeTimeMax) + if m2.sequenceNum != 2 && m3.sequenceNum != 2 { + t.Fatalf("bad seqnos %v, %v", m2.sequenceNum, m3.sequenceNum) + } +} + +func TestMemberList_ProbeNode_FallbackTCP_Disabled(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + addr3 := getBindAddr() + addr4 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + ip3 := []byte(addr3) + ip4 := []byte(addr4) + + var probeTimeMax time.Duration + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + probeTimeMax = c.ProbeInterval + 20*time.Millisecond + }) + defer m1.Shutdown() + + m2 := HostMemberlist(addr2.String(), t, nil) + defer m2.Shutdown() + + m3 := HostMemberlist(addr3.String(), t, nil) + defer m3.Shutdown() + + m4 := HostMemberlist(addr4.String(), t, nil) + defer m4.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + a3 := alive{Node: addr3.String(), Addr: ip3, Port: 7946, Incarnation: 1} + m1.aliveNode(&a3, nil, false) + + // Make sure m4 is configured with the same protocol version as m1 so + // the TCP fallback behavior is enabled. + a4 := alive{ + Node: addr4.String(), + Addr: ip4, + Port: 7946, + Incarnation: 1, + Vsn: []uint8{ + ProtocolVersionMin, + ProtocolVersionMax, + m1.config.ProtocolVersion, + m1.config.DelegateProtocolMin, + m1.config.DelegateProtocolMax, + m1.config.DelegateProtocolVersion, + }, + } + m1.aliveNode(&a4, nil, false) + + // Isolate m4 from UDP traffic by re-opening its listener on the wrong + // port. This should force the TCP fallback path to be used. + var err error + if err = m4.udpListener.Close(); err != nil { + t.Fatalf("err: %v", err) + } + udpAddr := &net.UDPAddr{IP: ip4, Port: 9999} + if m4.udpListener, err = net.ListenUDP("udp", udpAddr); err != nil { + t.Fatalf("err: %v", err) + } + + // Disable the TCP pings using the config mechanism. + m1.config.DisableTcpPings = true + + // Have node m1 probe m4. + n := m1.nodeMap[addr4.String()] + startProbe := time.Now() + m1.probeNode(n) + probeTime := time.Now().Sub(startProbe) + + // Node should be reported suspect. + if n.State != stateSuspect { + t.Fatalf("expect node to be suspect") + } + + // Make sure TCP activity didn't cause us to wait too long before + // timing out. + if probeTime > probeTimeMax { + t.Fatalf("took to long to probe, %9.6f", probeTime.Seconds()) + } + + // Confirm at least one of the peers attempted an indirect probe. + time.Sleep(probeTimeMax) + if m2.sequenceNum != 1 && m3.sequenceNum != 1 { + t.Fatalf("bad seqnos %v, %v", m2.sequenceNum, m3.sequenceNum) + } +} + +func TestMemberList_ProbeNode_FallbackTCP_OldProtocol(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + addr3 := getBindAddr() + addr4 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + ip3 := []byte(addr3) + ip4 := []byte(addr4) + + var probeTimeMax time.Duration + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + probeTimeMax = c.ProbeInterval + 20*time.Millisecond + }) + defer m1.Shutdown() + + m2 := HostMemberlist(addr2.String(), t, nil) + defer m2.Shutdown() + + m3 := HostMemberlist(addr3.String(), t, nil) + defer m3.Shutdown() + + m4 := HostMemberlist(addr4.String(), t, nil) + defer m4.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + a3 := alive{Node: addr3.String(), Addr: ip3, Port: 7946, Incarnation: 1} + m1.aliveNode(&a3, nil, false) + + // Set up m4 so that it doesn't understand a version of the protocol + // that supports TCP pings. + a4 := alive{ + Node: addr4.String(), + Addr: ip4, + Port: 7946, + Incarnation: 1, + Vsn: []uint8{ + ProtocolVersionMin, + ProtocolVersion2Compatible, + ProtocolVersion2Compatible, + m1.config.DelegateProtocolMin, + m1.config.DelegateProtocolMax, + m1.config.DelegateProtocolVersion, + }, + } + m1.aliveNode(&a4, nil, false) + + // Isolate m4 from UDP traffic by re-opening its listener on the wrong + // port. This should force the TCP fallback path to be used. + var err error + if err = m4.udpListener.Close(); err != nil { + t.Fatalf("err: %v", err) + } + udpAddr := &net.UDPAddr{IP: ip4, Port: 9999} + if m4.udpListener, err = net.ListenUDP("udp", udpAddr); err != nil { + t.Fatalf("err: %v", err) + } + + // Have node m1 probe m4. + n := m1.nodeMap[addr4.String()] + startProbe := time.Now() + m1.probeNode(n) + probeTime := time.Now().Sub(startProbe) + + // Node should be reported suspect. + if n.State != stateSuspect { + t.Fatalf("expect node to be suspect") + } + + // Make sure TCP activity didn't cause us to wait too long before + // timing out. + if probeTime > probeTimeMax { + t.Fatalf("took to long to probe, %9.6f", probeTime.Seconds()) + } + + // Confirm at least one of the peers attempted an indirect probe. + time.Sleep(probeTimeMax) + if m2.sequenceNum != 1 && m3.sequenceNum != 1 { + t.Fatalf("bad seqnos %v, %v", m2.sequenceNum, m3.sequenceNum) + } +} +*/ + +func TestMemberList_ProbeNode_Awareness_Degraded(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + addr3 := getBindAddr() + addr4 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + ip3 := []byte(addr3) + ip4 := []byte(addr4) + + var probeTimeMin time.Duration + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + probeTimeMin = 2*c.ProbeInterval - 50*time.Millisecond + }) + defer m1.Shutdown() + + m2 := HostMemberlist(addr2.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + }) + defer m2.Shutdown() + + m3 := HostMemberlist(addr3.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + }) + defer m3.Shutdown() + + // This will enable nacks by invoking the latest protocol version. + vsn := []uint8{ + ProtocolVersionMin, + ProtocolVersionMax, + m1.config.ProtocolVersion, + m1.config.DelegateProtocolMin, + m1.config.DelegateProtocolMax, + m1.config.DelegateProtocolVersion, + } + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a2, nil, false) + a3 := alive{Node: addr3.String(), Addr: ip3, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a3, nil, false) + + // Node 4 never gets started. + a4 := alive{Node: addr4.String(), Addr: ip4, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a4, nil, false) + + // Start the health in a degraded state. + m1.awareness.ApplyDelta(1) + if score := m1.GetHealthScore(); score != 1 { + t.Fatalf("bad: %d", score) + } + + // Have node m1 probe m4. + n := m1.nodeMap[addr4.String()] + startProbe := time.Now() + m1.probeNode(n) + probeTime := time.Now().Sub(startProbe) + + // Node should be reported suspect. + if n.State != stateSuspect { + t.Fatalf("expect node to be suspect") + } + + // Make sure we timed out approximately on time (note that we accounted + // for the slowed-down failure detector in the probeTimeMin calculation. + if probeTime < probeTimeMin { + t.Fatalf("probed too quickly, %9.6f", probeTime.Seconds()) + } + + // Confirm at least one of the peers attempted an indirect probe. + if m2.sequenceNum != 1 && m3.sequenceNum != 1 { + t.Fatalf("bad seqnos %v, %v", m2.sequenceNum, m3.sequenceNum) + } + + // We should have gotten all the nacks, so our score should remain the + // same, since we didn't get a successful probe. + if score := m1.GetHealthScore(); score != 1 { + t.Fatalf("bad: %d", score) + } +} + +func TestMemberList_ProbeNode_Awareness_Improved(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + }) + defer m1.Shutdown() + + m2 := HostMemberlist(addr2.String(), t, nil) + defer m2.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + + // Start the health in a degraded state. + m1.awareness.ApplyDelta(1) + if score := m1.GetHealthScore(); score != 1 { + t.Fatalf("bad: %d", score) + } + + // Have node m1 probe m2. + n := m1.nodeMap[addr2.String()] + m1.probeNode(n) + + // Node should be reported alive. + if n.State != stateAlive { + t.Fatalf("expect node to be suspect") + } + + // Our score should have improved since we did a good probe. + if score := m1.GetHealthScore(); score != 0 { + t.Fatalf("bad: %d", score) + } +} + +func TestMemberList_ProbeNode_Awareness_MissedNack(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + addr3 := getBindAddr() + addr4 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + ip3 := []byte(addr3) + ip4 := []byte(addr4) + + var probeTimeMax time.Duration + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + probeTimeMax = c.ProbeInterval + 50*time.Millisecond + }) + defer m1.Shutdown() + + m2 := HostMemberlist(addr2.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + }) + defer m2.Shutdown() + + // This will enable nacks by invoking the latest protocol version. + vsn := []uint8{ + ProtocolVersionMin, + ProtocolVersionMax, + m1.config.ProtocolVersion, + m1.config.DelegateProtocolMin, + m1.config.DelegateProtocolMax, + m1.config.DelegateProtocolVersion, + } + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a2, nil, false) + + // Node 3 and node 4 never get started. + a3 := alive{Node: addr3.String(), Addr: ip3, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a3, nil, false) + a4 := alive{Node: addr4.String(), Addr: ip4, Port: 7946, Incarnation: 1, Vsn: vsn} + m1.aliveNode(&a4, nil, false) + + // Make sure health looks good. + if score := m1.GetHealthScore(); score != 0 { + t.Fatalf("bad: %d", score) + } + + // Have node m1 probe m4. + n := m1.nodeMap[addr4.String()] + startProbe := time.Now() + m1.probeNode(n) + probeTime := time.Now().Sub(startProbe) + + // Node should be reported suspect. + if n.State != stateSuspect { + t.Fatalf("expect node to be suspect") + } + + // Make sure we timed out approximately on time. + if probeTime > probeTimeMax { + t.Fatalf("took to long to probe, %9.6f", probeTime.Seconds()) + } + + // We should have gotten dinged for the missed nack. + time.Sleep(probeTimeMax) + if score := m1.GetHealthScore(); score != 1 { + t.Fatalf("bad: %d", score) + } +} + +func TestMemberList_ProbeNode_Awareness_OldProtocol(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + addr3 := getBindAddr() + addr4 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + ip3 := []byte(addr3) + ip4 := []byte(addr4) + + var probeTimeMax time.Duration + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = 10 * time.Millisecond + c.ProbeInterval = 200 * time.Millisecond + probeTimeMax = c.ProbeInterval + 20*time.Millisecond + }) + defer m1.Shutdown() + + m2 := HostMemberlist(addr2.String(), t, nil) + defer m2.Shutdown() + + m3 := HostMemberlist(addr3.String(), t, nil) + defer m3.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + a3 := alive{Node: addr3.String(), Addr: ip3, Port: 7946, Incarnation: 1} + m1.aliveNode(&a3, nil, false) + + // Node 4 never gets started. + a4 := alive{Node: addr4.String(), Addr: ip4, Port: 7946, Incarnation: 1} + m1.aliveNode(&a4, nil, false) + + // Make sure health looks good. + if score := m1.GetHealthScore(); score != 0 { + t.Fatalf("bad: %d", score) + } + + // Have node m1 probe m4. + n := m1.nodeMap[addr4.String()] + startProbe := time.Now() + m1.probeNode(n) + probeTime := time.Now().Sub(startProbe) + + // Node should be reported suspect. + if n.State != stateSuspect { + t.Fatalf("expect node to be suspect") + } + + // Make sure we timed out approximately on time. + if probeTime > probeTimeMax { + t.Fatalf("took to long to probe, %9.6f", probeTime.Seconds()) + } + + // Confirm at least one of the peers attempted an indirect probe. + time.Sleep(probeTimeMax) + if m2.sequenceNum != 1 && m3.sequenceNum != 1 { + t.Fatalf("bad seqnos %v, %v", m2.sequenceNum, m3.sequenceNum) + } + + // Since we are using the old protocol here, we should have gotten dinged + // for a failed health check. + if score := m1.GetHealthScore(); score != 1 { + t.Fatalf("bad: %d", score) + } +} + +func TestMemberList_ProbeNode_Buddy(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = time.Millisecond + c.ProbeInterval = 10 * time.Millisecond + }) + m2 := HostMemberlist(addr2.String(), t, nil) + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + + m1.aliveNode(&a1, nil, true) + m1.aliveNode(&a2, nil, false) + m2.aliveNode(&a2, nil, true) + + // Force the state to suspect so we piggyback a suspect message with the ping. + // We should see this get refuted later, and the ping will succeed. + n := m1.nodeMap[addr2.String()] + n.State = stateSuspect + m1.probeNode(n) + + // Make sure a ping was sent. + if m1.sequenceNum != 1 { + t.Fatalf("bad seqno %v", m1.sequenceNum) + } + + // Check a broadcast is queued. + if num := m2.broadcasts.NumQueued(); num != 1 { + t.Fatalf("expected only one queued message: %d", num) + } + + // Should be alive msg. + if messageType(m2.broadcasts.bcQueue[0].b.Message()[0]) != aliveMsg { + t.Fatalf("expected queued alive msg") + } +} + +func TestMemberList_ProbeNode(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = time.Millisecond + c.ProbeInterval = 10 * time.Millisecond + }) + _ = HostMemberlist(addr2.String(), t, nil) + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + + n := m1.nodeMap[addr2.String()] + m1.probeNode(n) + + // Should be marked alive + if n.State != stateAlive { + t.Fatalf("Expect node to be alive") + } + + // Should increment seqno + if m1.sequenceNum != 1 { + t.Fatalf("bad seqno %v", m1.sequenceNum) + } +} + +func TestMemberList_Ping(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.ProbeTimeout = time.Millisecond + c.ProbeInterval = 10 * time.Second + }) + _ = HostMemberlist(addr2.String(), t, nil) + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + + // Do a legit ping. + n := m1.nodeMap[addr2.String()] + addr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(addr2.String(), "7946")) + if err != nil { + t.Fatalf("err: %v", err) + } + rtt, err := m1.Ping(n.Name, addr) + if err != nil { + t.Fatalf("err: %v", err) + } + if !(rtt > 0) { + t.Fatalf("bad: %v", rtt) + } + + // This ping has a bad node name so should timeout. + _, err = m1.Ping("bad", addr) + if _, ok := err.(NoPingResponseError); !ok || err == nil { + t.Fatalf("bad: %v", err) + } +} + +func TestMemberList_ResetNodes(t *testing.T) { + m := GetMemberlist(t) + a1 := alive{Node: "test1", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a1, nil, false) + a2 := alive{Node: "test2", Addr: []byte{127, 0, 0, 2}, Incarnation: 1} + m.aliveNode(&a2, nil, false) + a3 := alive{Node: "test3", Addr: []byte{127, 0, 0, 3}, Incarnation: 1} + m.aliveNode(&a3, nil, false) + d := dead{Node: "test2", Incarnation: 1} + m.deadNode(&d) + + m.config.GossipToTheDeadTime = 100 * time.Millisecond + m.resetNodes() + if len(m.nodes) != 3 { + t.Fatalf("Bad length") + } + if _, ok := m.nodeMap["test2"]; !ok { + t.Fatalf("test2 should not be unmapped") + } + + time.Sleep(200 * time.Millisecond) + m.resetNodes() + if len(m.nodes) != 2 { + t.Fatalf("Bad length") + } + if _, ok := m.nodeMap["test2"]; ok { + t.Fatalf("test2 should be unmapped") + } +} + +func TestMemberList_NextSeq(t *testing.T) { + m := &Memberlist{} + if m.nextSeqNo() != 1 { + t.Fatalf("bad sequence no") + } + if m.nextSeqNo() != 2 { + t.Fatalf("bad sequence no") + } +} + +func TestMemberList_setProbeChannels(t *testing.T) { + m := &Memberlist{ackHandlers: make(map[uint32]*ackHandler)} + + ch := make(chan ackMessage, 1) + m.setProbeChannels(0, ch, nil, 10*time.Millisecond) + + if _, ok := m.ackHandlers[0]; !ok { + t.Fatalf("missing handler") + } + time.Sleep(20 * time.Millisecond) + + if _, ok := m.ackHandlers[0]; ok { + t.Fatalf("non-reaped handler") + } +} + +func TestMemberList_setAckHandler(t *testing.T) { + m := &Memberlist{ackHandlers: make(map[uint32]*ackHandler)} + + f := func([]byte, time.Time) {} + m.setAckHandler(0, f, 10*time.Millisecond) + + if _, ok := m.ackHandlers[0]; !ok { + t.Fatalf("missing handler") + } + time.Sleep(20 * time.Millisecond) + + if _, ok := m.ackHandlers[0]; ok { + t.Fatalf("non-reaped handler") + } +} + +func TestMemberList_invokeAckHandler(t *testing.T) { + m := &Memberlist{ackHandlers: make(map[uint32]*ackHandler)} + + // Does nothing + m.invokeAckHandler(ackResp{}, time.Now()) + + var b bool + f := func(payload []byte, timestamp time.Time) { b = true } + m.setAckHandler(0, f, 10*time.Millisecond) + + // Should set b + m.invokeAckHandler(ackResp{0, nil}, time.Now()) + if !b { + t.Fatalf("b not set") + } + + if _, ok := m.ackHandlers[0]; ok { + t.Fatalf("non-reaped handler") + } +} + +func TestMemberList_invokeAckHandler_Channel_Ack(t *testing.T) { + m := &Memberlist{ackHandlers: make(map[uint32]*ackHandler)} + + ack := ackResp{0, []byte{0, 0, 0}} + + // Does nothing + m.invokeAckHandler(ack, time.Now()) + + ackCh := make(chan ackMessage, 1) + nackCh := make(chan struct{}, 1) + m.setProbeChannels(0, ackCh, nackCh, 10*time.Millisecond) + + // Should send message + m.invokeAckHandler(ack, time.Now()) + + select { + case v := <-ackCh: + if v.Complete != true { + t.Fatalf("Bad value") + } + if bytes.Compare(v.Payload, ack.Payload) != 0 { + t.Fatalf("wrong payload. expected: %v; actual: %v", ack.Payload, v.Payload) + } + + case <-nackCh: + t.Fatalf("should not get a nack") + + default: + t.Fatalf("message not sent") + } + + if _, ok := m.ackHandlers[0]; ok { + t.Fatalf("non-reaped handler") + } +} + +func TestMemberList_invokeAckHandler_Channel_Nack(t *testing.T) { + m := &Memberlist{ackHandlers: make(map[uint32]*ackHandler)} + + nack := nackResp{0} + + // Does nothing. + m.invokeNackHandler(nack) + + ackCh := make(chan ackMessage, 1) + nackCh := make(chan struct{}, 1) + m.setProbeChannels(0, ackCh, nackCh, 10*time.Millisecond) + + // Should send message. + m.invokeNackHandler(nack) + + select { + case <-ackCh: + t.Fatalf("should not get an ack") + + case <-nackCh: + // Good. + + default: + t.Fatalf("message not sent") + } + + // Getting a nack doesn't reap the handler so that we can still forward + // an ack up to the reap time, if we get one. + if _, ok := m.ackHandlers[0]; !ok { + t.Fatalf("handler should not be reaped") + } + + ack := ackResp{0, []byte{0, 0, 0}} + m.invokeAckHandler(ack, time.Now()) + + select { + case v := <-ackCh: + if v.Complete != true { + t.Fatalf("Bad value") + } + if bytes.Compare(v.Payload, ack.Payload) != 0 { + t.Fatalf("wrong payload. expected: %v; actual: %v", ack.Payload, v.Payload) + } + + case <-nackCh: + t.Fatalf("should not get a nack") + + default: + t.Fatalf("message not sent") + } + + if _, ok := m.ackHandlers[0]; ok { + t.Fatalf("non-reaped handler") + } +} + +func TestMemberList_AliveNode_NewNode(t *testing.T) { + ch := make(chan NodeEvent, 1) + m := GetMemberlist(t) + m.config.Events = &ChannelEventDelegate{ch} + + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, false) + + if len(m.nodes) != 1 { + t.Fatalf("should add node") + } + + state, ok := m.nodeMap["test"] + if !ok { + t.Fatalf("should map node") + } + + if state.Incarnation != 1 { + t.Fatalf("bad incarnation") + } + if state.State != stateAlive { + t.Fatalf("bad state") + } + if time.Now().Sub(state.StateChange) > time.Second { + t.Fatalf("bad change delta") + } + + // Check for a join message + select { + case e := <-ch: + if e.Node.Name != "test" { + t.Fatalf("bad node name") + } + default: + t.Fatalf("no join message") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected queued message") + } +} + +func TestMemberList_AliveNode_SuspectNode(t *testing.T) { + ch := make(chan NodeEvent, 1) + m := GetMemberlist(t) + + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, false) + + // Listen only after first join + m.config.Events = &ChannelEventDelegate{ch} + + // Make suspect + state := m.nodeMap["test"] + state.State = stateSuspect + state.StateChange = state.StateChange.Add(-time.Hour) + + // Old incarnation number, should not change + m.aliveNode(&a, nil, false) + if state.State != stateSuspect { + t.Fatalf("update with old incarnation!") + } + + // Should reset to alive now + a.Incarnation = 2 + m.aliveNode(&a, nil, false) + if state.State != stateAlive { + t.Fatalf("no update with new incarnation!") + } + + if time.Now().Sub(state.StateChange) > time.Second { + t.Fatalf("bad change delta") + } + + // Check for a no join message + select { + case <-ch: + t.Fatalf("got bad join message") + default: + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected queued message") + } +} + +func TestMemberList_AliveNode_Idempotent(t *testing.T) { + ch := make(chan NodeEvent, 1) + m := GetMemberlist(t) + + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, false) + + // Listen only after first join + m.config.Events = &ChannelEventDelegate{ch} + + // Make suspect + state := m.nodeMap["test"] + stateTime := state.StateChange + + // Should reset to alive now + a.Incarnation = 2 + m.aliveNode(&a, nil, false) + if state.State != stateAlive { + t.Fatalf("non idempotent") + } + + if stateTime != state.StateChange { + t.Fatalf("should not change state") + } + + // Check for a no join message + select { + case <-ch: + t.Fatalf("got bad join message") + default: + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected only one queued message") + } +} + +// Serf Bug: GH-58, Meta data does not update +func TestMemberList_AliveNode_ChangeMeta(t *testing.T) { + ch := make(chan NodeEvent, 1) + m := GetMemberlist(t) + + a := alive{ + Node: "test", + Addr: []byte{127, 0, 0, 1}, + Meta: []byte("val1"), + Incarnation: 1} + m.aliveNode(&a, nil, false) + + // Listen only after first join + m.config.Events = &ChannelEventDelegate{ch} + + // Make suspect + state := m.nodeMap["test"] + + // Should reset to alive now + a.Incarnation = 2 + a.Meta = []byte("val2") + m.aliveNode(&a, nil, false) + + // Check updates + if bytes.Compare(state.Meta, a.Meta) != 0 { + t.Fatalf("meta did not update") + } + + // Check for a NotifyUpdate + select { + case e := <-ch: + if e.Event != NodeUpdate { + t.Fatalf("bad event: %v", e) + } + if e.Node != &state.Node { + t.Fatalf("bad event: %v", e) + } + if bytes.Compare(e.Node.Meta, a.Meta) != 0 { + t.Fatalf("meta did not update") + } + default: + t.Fatalf("missing event!") + } + +} + +func TestMemberList_AliveNode_Refute(t *testing.T) { + m := GetMemberlist(t) + a := alive{Node: m.config.Name, Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, true) + + // Clear queue + m.broadcasts.Reset() + + // Conflicting alive + s := alive{ + Node: m.config.Name, + Addr: []byte{127, 0, 0, 1}, + Incarnation: 2, + Meta: []byte("foo"), + } + m.aliveNode(&s, nil, false) + + state := m.nodeMap[m.config.Name] + if state.State != stateAlive { + t.Fatalf("should still be alive") + } + if state.Meta != nil { + t.Fatalf("meta should still be nil") + } + + // Check a broad cast is queued + if num := m.broadcasts.NumQueued(); num != 1 { + t.Fatalf("expected only one queued message: %d", + num) + } + + // Should be alive mesg + if messageType(m.broadcasts.bcQueue[0].b.Message()[0]) != aliveMsg { + t.Fatalf("expected queued alive msg") + } +} + +func TestMemberList_SuspectNode_NoNode(t *testing.T) { + m := GetMemberlist(t) + s := suspect{Node: "test", Incarnation: 1} + m.suspectNode(&s) + if len(m.nodes) != 0 { + t.Fatalf("don't expect nodes") + } +} + +func TestMemberList_SuspectNode(t *testing.T) { + m := GetMemberlist(t) + m.config.ProbeInterval = time.Millisecond + m.config.SuspicionMult = 1 + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, false) + + state := m.nodeMap["test"] + state.StateChange = state.StateChange.Add(-time.Hour) + + s := suspect{Node: "test", Incarnation: 1} + m.suspectNode(&s) + + if state.State != stateSuspect { + t.Fatalf("Bad state") + } + + change := state.StateChange + if time.Now().Sub(change) > time.Second { + t.Fatalf("bad change delta") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected only one queued message") + } + + // Check its a suspect message + if messageType(m.broadcasts.bcQueue[0].b.Message()[0]) != suspectMsg { + t.Fatalf("expected queued suspect msg") + } + + // Wait for the timeout + time.Sleep(10 * time.Millisecond) + + if state.State != stateDead { + t.Fatalf("Bad state") + } + + if time.Now().Sub(state.StateChange) > time.Second { + t.Fatalf("bad change delta") + } + if !state.StateChange.After(change) { + t.Fatalf("should increment time") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected only one queued message") + } + + // Check its a suspect message + if messageType(m.broadcasts.bcQueue[0].b.Message()[0]) != deadMsg { + t.Fatalf("expected queued dead msg") + } +} + +func TestMemberList_SuspectNode_DoubleSuspect(t *testing.T) { + m := GetMemberlist(t) + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, false) + + state := m.nodeMap["test"] + state.StateChange = state.StateChange.Add(-time.Hour) + + s := suspect{Node: "test", Incarnation: 1} + m.suspectNode(&s) + + if state.State != stateSuspect { + t.Fatalf("Bad state") + } + + change := state.StateChange + if time.Now().Sub(change) > time.Second { + t.Fatalf("bad change delta") + } + + // clear the broadcast queue + m.broadcasts.Reset() + + // Suspect again + m.suspectNode(&s) + + if state.StateChange != change { + t.Fatalf("unexpected state change") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 0 { + t.Fatalf("expected only one queued message") + } + +} + +func TestMemberList_SuspectNode_OldSuspect(t *testing.T) { + m := GetMemberlist(t) + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 10} + m.aliveNode(&a, nil, false) + + state := m.nodeMap["test"] + state.StateChange = state.StateChange.Add(-time.Hour) + + // Clear queue + m.broadcasts.Reset() + + s := suspect{Node: "test", Incarnation: 1} + m.suspectNode(&s) + + if state.State != stateAlive { + t.Fatalf("Bad state") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 0 { + t.Fatalf("expected only one queued message") + } +} + +func TestMemberList_SuspectNode_Refute(t *testing.T) { + m := GetMemberlist(t) + a := alive{Node: m.config.Name, Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, true) + + // Clear queue + m.broadcasts.Reset() + + // Make sure health is in a good state + if score := m.GetHealthScore(); score != 0 { + t.Fatalf("bad: %d", score) + } + + s := suspect{Node: m.config.Name, Incarnation: 1} + m.suspectNode(&s) + + state := m.nodeMap[m.config.Name] + if state.State != stateAlive { + t.Fatalf("should still be alive") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected only one queued message") + } + + // Should be alive mesg + if messageType(m.broadcasts.bcQueue[0].b.Message()[0]) != aliveMsg { + t.Fatalf("expected queued alive msg") + } + + // Health should have been dinged + if score := m.GetHealthScore(); score != 1 { + t.Fatalf("bad: %d", score) + } +} + +func TestMemberList_DeadNode_NoNode(t *testing.T) { + m := GetMemberlist(t) + d := dead{Node: "test", Incarnation: 1} + m.deadNode(&d) + if len(m.nodes) != 0 { + t.Fatalf("don't expect nodes") + } +} + +func TestMemberList_DeadNode(t *testing.T) { + ch := make(chan NodeEvent, 1) + m := GetMemberlist(t) + m.config.Events = &ChannelEventDelegate{ch} + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, false) + + // Read the join event + <-ch + + state := m.nodeMap["test"] + state.StateChange = state.StateChange.Add(-time.Hour) + + d := dead{Node: "test", Incarnation: 1} + m.deadNode(&d) + + if state.State != stateDead { + t.Fatalf("Bad state") + } + + change := state.StateChange + if time.Now().Sub(change) > time.Second { + t.Fatalf("bad change delta") + } + + select { + case leave := <-ch: + if leave.Event != NodeLeave || leave.Node.Name != "test" { + t.Fatalf("bad node name") + } + default: + t.Fatalf("no leave message") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected only one queued message") + } + + // Check its a suspect message + if messageType(m.broadcasts.bcQueue[0].b.Message()[0]) != deadMsg { + t.Fatalf("expected queued dead msg") + } +} + +func TestMemberList_DeadNode_Double(t *testing.T) { + ch := make(chan NodeEvent, 1) + m := GetMemberlist(t) + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, false) + + state := m.nodeMap["test"] + state.StateChange = state.StateChange.Add(-time.Hour) + + d := dead{Node: "test", Incarnation: 1} + m.deadNode(&d) + + // Clear queue + m.broadcasts.Reset() + + // Notify after the first dead + m.config.Events = &ChannelEventDelegate{ch} + + // Should do nothing + d.Incarnation = 2 + m.deadNode(&d) + + select { + case <-ch: + t.Fatalf("should not get leave") + default: + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 0 { + t.Fatalf("expected only one queued message") + } +} + +func TestMemberList_DeadNode_OldDead(t *testing.T) { + m := GetMemberlist(t) + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 10} + m.aliveNode(&a, nil, false) + + state := m.nodeMap["test"] + state.StateChange = state.StateChange.Add(-time.Hour) + + d := dead{Node: "test", Incarnation: 1} + m.deadNode(&d) + + if state.State != stateAlive { + t.Fatalf("Bad state") + } +} + +func TestMemberList_DeadNode_AliveReplay(t *testing.T) { + m := GetMemberlist(t) + a := alive{Node: "test", Addr: []byte{127, 0, 0, 1}, Incarnation: 10} + m.aliveNode(&a, nil, false) + + d := dead{Node: "test", Incarnation: 10} + m.deadNode(&d) + + // Replay alive at same incarnation + m.aliveNode(&a, nil, false) + + // Should remain dead + state, ok := m.nodeMap["test"] + if ok && state.State != stateDead { + t.Fatalf("Bad state") + } +} + +func TestMemberList_DeadNode_Refute(t *testing.T) { + m := GetMemberlist(t) + a := alive{Node: m.config.Name, Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a, nil, true) + + // Clear queue + m.broadcasts.Reset() + + // Make sure health is in a good state + if score := m.GetHealthScore(); score != 0 { + t.Fatalf("bad: %d", score) + } + + d := dead{Node: m.config.Name, Incarnation: 1} + m.deadNode(&d) + + state := m.nodeMap[m.config.Name] + if state.State != stateAlive { + t.Fatalf("should still be alive") + } + + // Check a broad cast is queued + if m.broadcasts.NumQueued() != 1 { + t.Fatalf("expected only one queued message") + } + + // Should be alive mesg + if messageType(m.broadcasts.bcQueue[0].b.Message()[0]) != aliveMsg { + t.Fatalf("expected queued alive msg") + } + + // We should have been dinged + if score := m.GetHealthScore(); score != 1 { + t.Fatalf("bad: %d", score) + } +} + +func TestMemberList_MergeState(t *testing.T) { + m := GetMemberlist(t) + a1 := alive{Node: "test1", Addr: []byte{127, 0, 0, 1}, Incarnation: 1} + m.aliveNode(&a1, nil, false) + a2 := alive{Node: "test2", Addr: []byte{127, 0, 0, 2}, Incarnation: 1} + m.aliveNode(&a2, nil, false) + a3 := alive{Node: "test3", Addr: []byte{127, 0, 0, 3}, Incarnation: 1} + m.aliveNode(&a3, nil, false) + + s := suspect{Node: "test1", Incarnation: 1} + m.suspectNode(&s) + + remote := []pushNodeState{ + pushNodeState{ + Name: "test1", + Addr: []byte{127, 0, 0, 1}, + Incarnation: 2, + State: stateAlive, + }, + pushNodeState{ + Name: "test2", + Addr: []byte{127, 0, 0, 2}, + Incarnation: 1, + State: stateSuspect, + }, + pushNodeState{ + Name: "test3", + Addr: []byte{127, 0, 0, 3}, + Incarnation: 1, + State: stateDead, + }, + pushNodeState{ + Name: "test4", + Addr: []byte{127, 0, 0, 4}, + Incarnation: 2, + State: stateAlive, + }, + } + + // Listen for changes + eventCh := make(chan NodeEvent, 1) + m.config.Events = &ChannelEventDelegate{eventCh} + + // Merge remote state + m.mergeState(remote) + + // Check the states + state := m.nodeMap["test1"] + if state.State != stateAlive || state.Incarnation != 2 { + t.Fatalf("Bad state %v", state) + } + + state = m.nodeMap["test2"] + if state.State != stateSuspect || state.Incarnation != 1 { + t.Fatalf("Bad state %v", state) + } + + state = m.nodeMap["test3"] + if state.State != stateSuspect { + t.Fatalf("Bad state %v", state) + } + + state = m.nodeMap["test4"] + if state.State != stateAlive || state.Incarnation != 2 { + t.Fatalf("Bad state %v", state) + } + + // Check the channels + select { + case e := <-eventCh: + if e.Event != NodeJoin || e.Node.Name != "test4" { + t.Fatalf("bad node %v", e) + } + default: + t.Fatalf("Expect join") + } + + select { + case e := <-eventCh: + t.Fatalf("Unexpect event: %v", e) + default: + } +} + +func TestMemberlist_Gossip(t *testing.T) { + ch := make(chan NodeEvent, 3) + + addr1 := getBindAddr() + addr2 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.GossipInterval = time.Millisecond + }) + m2 := HostMemberlist(addr2.String(), t, func(c *Config) { + c.Events = &ChannelEventDelegate{ch} + c.GossipInterval = time.Millisecond + }) + + defer m1.Shutdown() + defer m2.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + a3 := alive{Node: "172.0.0.1", Addr: []byte{172, 0, 0, 1}, Incarnation: 1} + m1.aliveNode(&a3, nil, false) + + // Gossip should send all this to m2 + m1.gossip() + + for i := 0; i < 3; i++ { + select { + case <-ch: + case <-time.After(50 * time.Millisecond): + t.Fatalf("timeout") + } + } +} + +func TestMemberlist_GossipToDead(t *testing.T) { + ch := make(chan NodeEvent, 2) + + addr1 := getBindAddr() + addr2 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.GossipInterval = time.Millisecond + c.GossipToTheDeadTime = 100 * time.Millisecond + }) + m2 := HostMemberlist(addr2.String(), t, func(c *Config) { + c.Events = &ChannelEventDelegate{ch} + }) + + defer m1.Shutdown() + defer m2.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + + // Shouldn't send anything to m2 here, node has been dead for 2x the GossipToTheDeadTime + m1.nodeMap[addr2.String()].State = stateDead + m1.nodeMap[addr2.String()].StateChange = time.Now().Add(-200 * time.Millisecond) + m1.gossip() + + select { + case <-ch: + t.Fatalf("shouldn't get gossip") + case <-time.After(50 * time.Millisecond): + } + + // Should gossip to m2 because its state has changed within GossipToTheDeadTime + m1.nodeMap[addr2.String()].StateChange = time.Now().Add(-20 * time.Millisecond) + m1.gossip() + + for i := 0; i < 2; i++ { + select { + case <-ch: + case <-time.After(50 * time.Millisecond): + t.Fatalf("timeout") + } + } +} + +func TestMemberlist_PushPull(t *testing.T) { + addr1 := getBindAddr() + addr2 := getBindAddr() + ip1 := []byte(addr1) + ip2 := []byte(addr2) + + ch := make(chan NodeEvent, 3) + + m1 := HostMemberlist(addr1.String(), t, func(c *Config) { + c.GossipInterval = 10 * time.Second + c.PushPullInterval = time.Millisecond + }) + m2 := HostMemberlist(addr2.String(), t, func(c *Config) { + c.GossipInterval = 10 * time.Second + c.Events = &ChannelEventDelegate{ch} + }) + + defer m1.Shutdown() + defer m2.Shutdown() + + a1 := alive{Node: addr1.String(), Addr: ip1, Port: 7946, Incarnation: 1} + m1.aliveNode(&a1, nil, true) + a2 := alive{Node: addr2.String(), Addr: ip2, Port: 7946, Incarnation: 1} + m1.aliveNode(&a2, nil, false) + + // Gossip should send all this to m2 + m1.pushPull() + + for i := 0; i < 2; i++ { + select { + case <-ch: + case <-time.After(10 * time.Millisecond): + t.Fatalf("timeout") + } + } +} + +func TestVerifyProtocol(t *testing.T) { + cases := []struct { + Anodes [][3]uint8 + Bnodes [][3]uint8 + expected bool + }{ + // Both running identical everything + { + Anodes: [][3]uint8{ + {0, 0, 0}, + }, + Bnodes: [][3]uint8{ + {0, 0, 0}, + }, + expected: true, + }, + + // One can understand newer, but speaking same protocol + { + Anodes: [][3]uint8{ + {0, 0, 0}, + }, + Bnodes: [][3]uint8{ + {0, 1, 0}, + }, + expected: true, + }, + + // One is speaking outside the range + { + Anodes: [][3]uint8{ + {0, 0, 0}, + }, + Bnodes: [][3]uint8{ + {1, 1, 1}, + }, + expected: false, + }, + + // Transitively outside the range + { + Anodes: [][3]uint8{ + {0, 1, 0}, + {0, 2, 1}, + }, + Bnodes: [][3]uint8{ + {1, 3, 1}, + }, + expected: false, + }, + + // Multi-node + { + Anodes: [][3]uint8{ + {0, 3, 2}, + {0, 2, 0}, + }, + Bnodes: [][3]uint8{ + {0, 2, 1}, + {0, 5, 0}, + }, + expected: true, + }, + } + + for _, tc := range cases { + aCore := make([][6]uint8, len(tc.Anodes)) + aApp := make([][6]uint8, len(tc.Anodes)) + for i, n := range tc.Anodes { + aCore[i] = [6]uint8{n[0], n[1], n[2], 0, 0, 0} + aApp[i] = [6]uint8{0, 0, 0, n[0], n[1], n[2]} + } + + bCore := make([][6]uint8, len(tc.Bnodes)) + bApp := make([][6]uint8, len(tc.Bnodes)) + for i, n := range tc.Bnodes { + bCore[i] = [6]uint8{n[0], n[1], n[2], 0, 0, 0} + bApp[i] = [6]uint8{0, 0, 0, n[0], n[1], n[2]} + } + + // Test core protocol verification + testVerifyProtocolSingle(t, aCore, bCore, tc.expected) + testVerifyProtocolSingle(t, bCore, aCore, tc.expected) + + // Test app protocol verification + testVerifyProtocolSingle(t, aApp, bApp, tc.expected) + testVerifyProtocolSingle(t, bApp, aApp, tc.expected) + } +} + +func testVerifyProtocolSingle(t *testing.T, A [][6]uint8, B [][6]uint8, expect bool) { + m := GetMemberlist(t) + defer m.Shutdown() + + m.nodes = make([]*nodeState, len(A)) + for i, n := range A { + m.nodes[i] = &nodeState{ + Node: Node{ + PMin: n[0], + PMax: n[1], + PCur: n[2], + DMin: n[3], + DMax: n[4], + DCur: n[5], + }, + } + } + + remote := make([]pushNodeState, len(B)) + for i, n := range B { + remote[i] = pushNodeState{ + Name: fmt.Sprintf("node %d", i), + Vsn: []uint8{n[0], n[1], n[2], n[3], n[4], n[5]}, + } + } + + err := m.verifyProtocol(remote) + if (err == nil) != expect { + t.Fatalf("bad:\nA: %v\nB: %v\nErr: %s", A, B, err) + } +} diff --git a/vendor/github.com/hashicorp/memberlist/suspicion_test.go b/vendor/github.com/hashicorp/memberlist/suspicion_test.go new file mode 100644 index 0000000000..1b5ca8a5ac --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/suspicion_test.go @@ -0,0 +1,198 @@ +package memberlist + +import ( + "testing" + "time" +) + +func TestSuspicion_remainingSuspicionTime(t *testing.T) { + cases := []struct { + n int32 + k int32 + elapsed time.Duration + min time.Duration + max time.Duration + expected time.Duration + }{ + {0, 3, 0, 2 * time.Second, 30 * time.Second, 30 * time.Second}, + {1, 3, 2 * time.Second, 2 * time.Second, 30 * time.Second, 14 * time.Second}, + {2, 3, 3 * time.Second, 2 * time.Second, 30 * time.Second, 4810 * time.Millisecond}, + {3, 3, 4 * time.Second, 2 * time.Second, 30 * time.Second, -2 * time.Second}, + {4, 3, 5 * time.Second, 2 * time.Second, 30 * time.Second, -3 * time.Second}, + {5, 3, 10 * time.Second, 2 * time.Second, 30 * time.Second, -8 * time.Second}, + } + for i, c := range cases { + remaining := remainingSuspicionTime(c.n, c.k, c.elapsed, c.min, c.max) + if remaining != c.expected { + t.Errorf("case %d: remaining %9.6f != expected %9.6f", i, remaining.Seconds(), c.expected.Seconds()) + } + } +} + +func TestSuspicion_Timer(t *testing.T) { + const k = 3 + const min = 500 * time.Millisecond + const max = 2 * time.Second + + type pair struct { + from string + newInfo bool + } + cases := []struct { + numConfirmations int + from string + confirmations []pair + expected time.Duration + }{ + { + 0, + "me", + []pair{}, + max, + }, + { + 1, + "me", + []pair{ + pair{"me", false}, + pair{"foo", true}, + }, + 1250 * time.Millisecond, + }, + { + 1, + "me", + []pair{ + pair{"me", false}, + pair{"foo", true}, + pair{"foo", false}, + pair{"foo", false}, + }, + 1250 * time.Millisecond, + }, + { + 2, + "me", + []pair{ + pair{"me", false}, + pair{"foo", true}, + pair{"bar", true}, + }, + 810 * time.Millisecond, + }, + { + 3, + "me", + []pair{ + pair{"me", false}, + pair{"foo", true}, + pair{"bar", true}, + pair{"baz", true}, + }, + min, + }, + { + 3, + "me", + []pair{ + pair{"me", false}, + pair{"foo", true}, + pair{"bar", true}, + pair{"baz", true}, + pair{"zoo", false}, + }, + min, + }, + } + for i, c := range cases { + ch := make(chan time.Duration, 1) + start := time.Now() + f := func(numConfirmations int) { + if numConfirmations != c.numConfirmations { + t.Errorf("case %d: bad %d != %d", i, numConfirmations, c.numConfirmations) + } + + ch <- time.Now().Sub(start) + } + + // Create the timer and add the requested confirmations. Wait + // the fudge amount to help make sure we calculate the timeout + // overall, and don't accumulate extra time. + s := newSuspicion(c.from, k, min, max, f) + fudge := 25 * time.Millisecond + for _, p := range c.confirmations { + time.Sleep(fudge) + if s.Confirm(p.from) != p.newInfo { + t.Fatalf("case %d: newInfo mismatch for %s", i, p.from) + } + } + + // Wait until right before the timeout and make sure the + // timer hasn't fired. + already := time.Duration(len(c.confirmations)) * fudge + time.Sleep(c.expected - already - fudge) + select { + case d := <-ch: + t.Fatalf("case %d: should not have fired (%9.6f)", i, d.Seconds()) + default: + } + + // Wait through the timeout and a little after and make sure it + // fires. + time.Sleep(2 * fudge) + select { + case <-ch: + default: + t.Fatalf("case %d: should have fired", i) + } + + // Confirm after to make sure it handles a negative remaining + // time correctly and doesn't fire again. + s.Confirm("late") + time.Sleep(c.expected + 2*fudge) + select { + case d := <-ch: + t.Fatalf("case %d: should not have fired (%9.6f)", i, d.Seconds()) + default: + } + } +} + +func TestSuspicion_Timer_ZeroK(t *testing.T) { + ch := make(chan struct{}, 1) + f := func(int) { + ch <- struct{}{} + } + + // This should select the min time since there are no expected + // confirmations to accelerate the timer. + s := newSuspicion("me", 0, 25*time.Millisecond, 30*time.Second, f) + if s.Confirm("foo") { + t.Fatalf("should not provide new information") + } + + select { + case <-ch: + case <-time.After(50 * time.Millisecond): + t.Fatalf("should have fired") + } +} + +func TestSuspicion_Timer_Immediate(t *testing.T) { + ch := make(chan struct{}, 1) + f := func(int) { + ch <- struct{}{} + } + + // This should underflow the timeout and fire immediately. + s := newSuspicion("me", 1, 100*time.Millisecond, 30*time.Second, f) + time.Sleep(200 * time.Millisecond) + s.Confirm("foo") + + // Wait a little while since the function gets called in a goroutine. + select { + case <-ch: + case <-time.After(25 * time.Millisecond): + t.Fatalf("should have fired") + } +} diff --git a/vendor/github.com/hashicorp/memberlist/transport_test.go b/vendor/github.com/hashicorp/memberlist/transport_test.go new file mode 100644 index 0000000000..b5249eb5fe --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/transport_test.go @@ -0,0 +1,124 @@ +package memberlist + +import ( + "bytes" + "testing" + "time" +) + +func TestTransport_Join(t *testing.T) { + net := &MockNetwork{} + + t1 := net.NewTransport() + + c1 := DefaultLANConfig() + c1.Name = "node1" + c1.Transport = t1 + m1, err := Create(c1) + if err != nil { + t.Fatalf("err: %v", err) + } + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + c2 := DefaultLANConfig() + c2.Name = "node2" + c2.Transport = net.NewTransport() + m2, err := Create(c2) + if err != nil { + t.Fatalf("err: %v", err) + } + m2.setAlive() + m2.schedule() + defer m2.Shutdown() + + num, err := m2.Join([]string{t1.addr.String()}) + if num != 1 { + t.Fatalf("bad: %d", num) + } + if err != nil { + t.Fatalf("err: %v", err) + } + + if len(m2.Members()) != 2 { + t.Fatalf("bad: %v", m2.Members()) + } + if m2.estNumNodes() != 2 { + t.Fatalf("bad: %v", m2.Members()) + } + +} + +func TestTransport_Send(t *testing.T) { + net := &MockNetwork{} + + t1 := net.NewTransport() + d1 := &MockDelegate{} + + c1 := DefaultLANConfig() + c1.Name = "node1" + c1.Transport = t1 + c1.Delegate = d1 + m1, err := Create(c1) + if err != nil { + t.Fatalf("err: %v", err) + } + m1.setAlive() + m1.schedule() + defer m1.Shutdown() + + c2 := DefaultLANConfig() + c2.Name = "node2" + c2.Transport = net.NewTransport() + m2, err := Create(c2) + if err != nil { + t.Fatalf("err: %v", err) + } + m2.setAlive() + m2.schedule() + defer m2.Shutdown() + + num, err := m2.Join([]string{t1.addr.String()}) + if num != 1 { + t.Fatalf("bad: %d", num) + } + if err != nil { + t.Fatalf("err: %v", err) + } + + if err := m2.SendTo(t1.addr, []byte("SendTo")); err != nil { + t.Fatalf("err: %v", err) + } + + var n1 *Node + for _, n := range m2.Members() { + if n.Name == c1.Name { + n1 = n + break + } + } + if n1 == nil { + t.Fatalf("bad") + } + + if err := m2.SendToUDP(n1, []byte("SendToUDP")); err != nil { + t.Fatalf("err: %v", err) + } + if err := m2.SendToTCP(n1, []byte("SendToTCP")); err != nil { + t.Fatalf("err: %v", err) + } + if err := m2.SendBestEffort(n1, []byte("SendBestEffort")); err != nil { + t.Fatalf("err: %v", err) + } + if err := m2.SendReliable(n1, []byte("SendReliable")); err != nil { + t.Fatalf("err: %v", err) + } + time.Sleep(100 * time.Millisecond) + + received := bytes.Join(d1.msgs, []byte("|")) + expected := []byte("SendTo|SendToUDP|SendToTCP|SendBestEffort|SendReliable") + if !bytes.Equal(received, expected) { + t.Fatalf("bad: %s", received) + } +} diff --git a/vendor/github.com/hashicorp/memberlist/util_test.go b/vendor/github.com/hashicorp/memberlist/util_test.go new file mode 100644 index 0000000000..b7f2b4199d --- /dev/null +++ b/vendor/github.com/hashicorp/memberlist/util_test.go @@ -0,0 +1,358 @@ +package memberlist + +import ( + "fmt" + "reflect" + "testing" + "time" +) + +func TestUtil_PortFunctions(t *testing.T) { + tests := []struct { + addr string + hasPort bool + ensurePort string + }{ + {"1.2.3.4", false, "1.2.3.4:8301"}, + {"1.2.3.4:1234", true, "1.2.3.4:1234"}, + {"2600:1f14:e22:1501:f9a:2e0c:a167:67e8", false, "[2600:1f14:e22:1501:f9a:2e0c:a167:67e8]:8301"}, + {"[2600:1f14:e22:1501:f9a:2e0c:a167:67e8]", false, "[2600:1f14:e22:1501:f9a:2e0c:a167:67e8]:8301"}, + {"[2600:1f14:e22:1501:f9a:2e0c:a167:67e8]:1234", true, "[2600:1f14:e22:1501:f9a:2e0c:a167:67e8]:1234"}, + {"localhost", false, "localhost:8301"}, + {"localhost:1234", true, "localhost:1234"}, + {"hashicorp.com", false, "hashicorp.com:8301"}, + {"hashicorp.com:1234", true, "hashicorp.com:1234"}, + } + for _, tt := range tests { + t.Run(tt.addr, func(t *testing.T) { + if got, want := hasPort(tt.addr), tt.hasPort; got != want { + t.Fatalf("got %v want %v", got, want) + } + if got, want := ensurePort(tt.addr, 8301), tt.ensurePort; got != want { + t.Fatalf("got %v want %v", got, want) + } + }) + } +} + +func TestEncodeDecode(t *testing.T) { + msg := &ping{SeqNo: 100} + buf, err := encode(pingMsg, msg) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + var out ping + if err := decode(buf.Bytes()[1:], &out); err != nil { + t.Fatalf("unexpected err: %s", err) + } + if msg.SeqNo != out.SeqNo { + t.Fatalf("bad sequence no") + } +} + +func TestRandomOffset(t *testing.T) { + vals := make(map[int]struct{}) + for i := 0; i < 100; i++ { + offset := randomOffset(2 << 30) + if _, ok := vals[offset]; ok { + t.Fatalf("got collision") + } + vals[offset] = struct{}{} + } +} + +func TestRandomOffset_Zero(t *testing.T) { + offset := randomOffset(0) + if offset != 0 { + t.Fatalf("bad offset") + } +} + +func TestSuspicionTimeout(t *testing.T) { + timeouts := map[int]time.Duration{ + 5: 1000 * time.Millisecond, + 10: 1000 * time.Millisecond, + 50: 1698 * time.Millisecond, + 100: 2000 * time.Millisecond, + 500: 2698 * time.Millisecond, + 1000: 3000 * time.Millisecond, + } + for n, expected := range timeouts { + timeout := suspicionTimeout(3, n, time.Second) / 3 + if timeout != expected { + t.Fatalf("bad: %v, %v", expected, timeout) + } + } +} + +func TestRetransmitLimit(t *testing.T) { + lim := retransmitLimit(3, 0) + if lim != 0 { + t.Fatalf("bad val %v", lim) + } + lim = retransmitLimit(3, 1) + if lim != 3 { + t.Fatalf("bad val %v", lim) + } + lim = retransmitLimit(3, 99) + if lim != 6 { + t.Fatalf("bad val %v", lim) + } +} + +func TestShuffleNodes(t *testing.T) { + orig := []*nodeState{ + &nodeState{ + State: stateDead, + }, + &nodeState{ + State: stateAlive, + }, + &nodeState{ + State: stateAlive, + }, + &nodeState{ + State: stateDead, + }, + &nodeState{ + State: stateAlive, + }, + &nodeState{ + State: stateAlive, + }, + &nodeState{ + State: stateDead, + }, + &nodeState{ + State: stateAlive, + }, + } + nodes := make([]*nodeState, len(orig)) + copy(nodes[:], orig[:]) + + if !reflect.DeepEqual(nodes, orig) { + t.Fatalf("should match") + } + + shuffleNodes(nodes) + + if reflect.DeepEqual(nodes, orig) { + t.Fatalf("should not match") + } +} + +func TestPushPullScale(t *testing.T) { + sec := time.Second + for i := 0; i <= 32; i++ { + if s := pushPullScale(sec, i); s != sec { + t.Fatalf("Bad time scale: %v", s) + } + } + for i := 33; i <= 64; i++ { + if s := pushPullScale(sec, i); s != 2*sec { + t.Fatalf("Bad time scale: %v", s) + } + } + for i := 65; i <= 128; i++ { + if s := pushPullScale(sec, i); s != 3*sec { + t.Fatalf("Bad time scale: %v", s) + } + } +} + +func TestMoveDeadNodes(t *testing.T) { + nodes := []*nodeState{ + &nodeState{ + State: stateDead, + StateChange: time.Now().Add(-20 * time.Second), + }, + &nodeState{ + State: stateAlive, + StateChange: time.Now().Add(-20 * time.Second), + }, + // This dead node should not be moved, as its state changed + // less than the specified GossipToTheDead time ago + &nodeState{ + State: stateDead, + StateChange: time.Now().Add(-10 * time.Second), + }, + &nodeState{ + State: stateAlive, + StateChange: time.Now().Add(-20 * time.Second), + }, + &nodeState{ + State: stateDead, + StateChange: time.Now().Add(-20 * time.Second), + }, + &nodeState{ + State: stateAlive, + StateChange: time.Now().Add(-20 * time.Second), + }, + } + + idx := moveDeadNodes(nodes, (15 * time.Second)) + if idx != 4 { + t.Fatalf("bad index") + } + for i := 0; i < idx; i++ { + switch i { + case 2: + // Recently dead node remains at index 2, + // since nodes are swapped out to move to end. + if nodes[i].State != stateDead { + t.Fatalf("Bad state %d", i) + } + default: + if nodes[i].State != stateAlive { + t.Fatalf("Bad state %d", i) + } + } + } + for i := idx; i < len(nodes); i++ { + if nodes[i].State != stateDead { + t.Fatalf("Bad state %d", i) + } + } +} + +func TestKRandomNodes(t *testing.T) { + nodes := []*nodeState{} + for i := 0; i < 90; i++ { + // Half the nodes are in a bad state + state := stateAlive + switch i % 3 { + case 0: + state = stateAlive + case 1: + state = stateSuspect + case 2: + state = stateDead + } + nodes = append(nodes, &nodeState{ + Node: Node{ + Name: fmt.Sprintf("test%d", i), + }, + State: state, + }) + } + + filterFunc := func(n *nodeState) bool { + if n.Name == "test0" || n.State != stateAlive { + return true + } + return false + } + + s1 := kRandomNodes(3, nodes, filterFunc) + s2 := kRandomNodes(3, nodes, filterFunc) + s3 := kRandomNodes(3, nodes, filterFunc) + + if reflect.DeepEqual(s1, s2) { + t.Fatalf("unexpected equal") + } + if reflect.DeepEqual(s1, s3) { + t.Fatalf("unexpected equal") + } + if reflect.DeepEqual(s2, s3) { + t.Fatalf("unexpected equal") + } + + for _, s := range [][]*nodeState{s1, s2, s3} { + if len(s) != 3 { + t.Fatalf("bad len") + } + for _, n := range s { + if n.Name == "test0" { + t.Fatalf("Bad name") + } + if n.State != stateAlive { + t.Fatalf("Bad state") + } + } + } +} + +func TestMakeCompoundMessage(t *testing.T) { + msg := &ping{SeqNo: 100} + buf, err := encode(pingMsg, msg) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + msgs := [][]byte{buf.Bytes(), buf.Bytes(), buf.Bytes()} + compound := makeCompoundMessage(msgs) + + if compound.Len() != 3*buf.Len()+3*compoundOverhead+compoundHeaderOverhead { + t.Fatalf("bad len") + } +} + +func TestDecodeCompoundMessage(t *testing.T) { + msg := &ping{SeqNo: 100} + buf, err := encode(pingMsg, msg) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + msgs := [][]byte{buf.Bytes(), buf.Bytes(), buf.Bytes()} + compound := makeCompoundMessage(msgs) + + trunc, parts, err := decodeCompoundMessage(compound.Bytes()[1:]) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + if trunc != 0 { + t.Fatalf("should not truncate") + } + if len(parts) != 3 { + t.Fatalf("bad parts") + } + for _, p := range parts { + if len(p) != buf.Len() { + t.Fatalf("bad part len") + } + } +} + +func TestDecodeCompoundMessage_Trunc(t *testing.T) { + msg := &ping{SeqNo: 100} + buf, err := encode(pingMsg, msg) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + msgs := [][]byte{buf.Bytes(), buf.Bytes(), buf.Bytes()} + compound := makeCompoundMessage(msgs) + + trunc, parts, err := decodeCompoundMessage(compound.Bytes()[1:38]) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + if trunc != 1 { + t.Fatalf("truncate: %d", trunc) + } + if len(parts) != 2 { + t.Fatalf("bad parts") + } + for _, p := range parts { + if len(p) != buf.Len() { + t.Fatalf("bad part len") + } + } +} + +func TestCompressDecompressPayload(t *testing.T) { + buf, err := compressPayload([]byte("testing")) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + decomp, err := decompressPayload(buf.Bytes()[1:]) + if err != nil { + t.Fatalf("unexpected err: %s", err) + } + + if !reflect.DeepEqual(decomp, []byte("testing")) { + t.Fatalf("bad payload: %v", decomp) + } +} diff --git a/vendor/github.com/jpillora/backoff/backoff_test.go b/vendor/github.com/jpillora/backoff/backoff_test.go new file mode 100644 index 0000000000..cb36833935 --- /dev/null +++ b/vendor/github.com/jpillora/backoff/backoff_test.go @@ -0,0 +1,126 @@ +package backoff + +import ( + "reflect" + "testing" + "time" +) + +func Test1(t *testing.T) { + + b := &Backoff{ + Min: 100 * time.Millisecond, + Max: 10 * time.Second, + Factor: 2, + } + + equals(t, b.Duration(), 100*time.Millisecond) + equals(t, b.Duration(), 200*time.Millisecond) + equals(t, b.Duration(), 400*time.Millisecond) + b.Reset() + equals(t, b.Duration(), 100*time.Millisecond) +} + +func TestForAttempt(t *testing.T) { + + b := &Backoff{ + Min: 100 * time.Millisecond, + Max: 10 * time.Second, + Factor: 2, + } + + equals(t, b.ForAttempt(0), 100*time.Millisecond) + equals(t, b.ForAttempt(1), 200*time.Millisecond) + equals(t, b.ForAttempt(2), 400*time.Millisecond) + b.Reset() + equals(t, b.ForAttempt(0), 100*time.Millisecond) +} + +func Test2(t *testing.T) { + + b := &Backoff{ + Min: 100 * time.Millisecond, + Max: 10 * time.Second, + Factor: 1.5, + } + + equals(t, b.Duration(), 100*time.Millisecond) + equals(t, b.Duration(), 150*time.Millisecond) + equals(t, b.Duration(), 225*time.Millisecond) + b.Reset() + equals(t, b.Duration(), 100*time.Millisecond) +} + +func Test3(t *testing.T) { + + b := &Backoff{ + Min: 100 * time.Nanosecond, + Max: 10 * time.Second, + Factor: 1.75, + } + + equals(t, b.Duration(), 100*time.Nanosecond) + equals(t, b.Duration(), 175*time.Nanosecond) + equals(t, b.Duration(), 306*time.Nanosecond) + b.Reset() + equals(t, b.Duration(), 100*time.Nanosecond) +} + +func Test4(t *testing.T) { + b := &Backoff{ + Min: 500 * time.Second, + Max: 100 * time.Second, + Factor: 1, + } + + equals(t, b.Duration(), b.Max) +} + +func TestGetAttempt(t *testing.T) { + b := &Backoff{ + Min: 100 * time.Millisecond, + Max: 10 * time.Second, + Factor: 2, + } + equals(t, b.Attempt(), float64(0)) + equals(t, b.Duration(), 100*time.Millisecond) + equals(t, b.Attempt(), float64(1)) + equals(t, b.Duration(), 200*time.Millisecond) + equals(t, b.Attempt(), float64(2)) + equals(t, b.Duration(), 400*time.Millisecond) + equals(t, b.Attempt(), float64(3)) + b.Reset() + equals(t, b.Attempt(), float64(0)) + equals(t, b.Duration(), 100*time.Millisecond) + equals(t, b.Attempt(), float64(1)) +} + +func TestJitter(t *testing.T) { + b := &Backoff{ + Min: 100 * time.Millisecond, + Max: 10 * time.Second, + Factor: 2, + Jitter: true, + } + + equals(t, b.Duration(), 100*time.Millisecond) + between(t, b.Duration(), 100*time.Millisecond, 200*time.Millisecond) + between(t, b.Duration(), 100*time.Millisecond, 400*time.Millisecond) + b.Reset() + equals(t, b.Duration(), 100*time.Millisecond) +} + +func between(t *testing.T, actual, low, high time.Duration) { + if actual < low { + t.Fatalf("Got %s, Expecting >= %s", actual, low) + } + if actual > high { + t.Fatalf("Got %s, Expecting <= %s", actual, high) + } +} + +func equals(t *testing.T, v1, v2 interface{}) { + if !reflect.DeepEqual(v1, v2) { + t.Fatalf("Got %v, Expecting %v", v1, v2) + } +} diff --git a/vendor/github.com/jtolds/gls/context_test.go b/vendor/github.com/jtolds/gls/context_test.go new file mode 100644 index 0000000000..ae5bde4aed --- /dev/null +++ b/vendor/github.com/jtolds/gls/context_test.go @@ -0,0 +1,139 @@ +package gls + +import ( + "fmt" + "sync" + "testing" +) + +func TestContexts(t *testing.T) { + mgr1 := NewContextManager() + mgr2 := NewContextManager() + + CheckVal := func(mgr *ContextManager, key, exp_val string) { + val, ok := mgr.GetValue(key) + if len(exp_val) == 0 { + if ok { + t.Fatalf("expected no value for key %s, got %s", key, val) + } + return + } + if !ok { + t.Fatalf("expected value %s for key %s, got no value", + exp_val, key) + } + if exp_val != val { + t.Fatalf("expected value %s for key %s, got %s", exp_val, key, + val) + } + + } + + Check := func(exp_m1v1, exp_m1v2, exp_m2v1, exp_m2v2 string) { + CheckVal(mgr1, "key1", exp_m1v1) + CheckVal(mgr1, "key2", exp_m1v2) + CheckVal(mgr2, "key1", exp_m2v1) + CheckVal(mgr2, "key2", exp_m2v2) + } + + Check("", "", "", "") + mgr2.SetValues(Values{"key1": "val1c"}, func() { + Check("", "", "val1c", "") + mgr1.SetValues(Values{"key1": "val1a"}, func() { + Check("val1a", "", "val1c", "") + mgr1.SetValues(Values{"key2": "val1b"}, func() { + Check("val1a", "val1b", "val1c", "") + var wg sync.WaitGroup + wg.Add(2) + go func() { + defer wg.Done() + Check("", "", "", "") + }() + Go(func() { + defer wg.Done() + Check("val1a", "val1b", "val1c", "") + }) + wg.Wait() + }) + }) + }) +} + +func ExampleContextManager_SetValues() { + var ( + mgr = NewContextManager() + request_id_key = GenSym() + ) + + MyLog := func() { + if request_id, ok := mgr.GetValue(request_id_key); ok { + fmt.Println("My request id is:", request_id) + } else { + fmt.Println("No request id found") + } + } + + mgr.SetValues(Values{request_id_key: "12345"}, func() { + MyLog() + }) + MyLog() + + // Output: My request id is: 12345 + // No request id found +} + +func ExampleGo() { + var ( + mgr = NewContextManager() + request_id_key = GenSym() + ) + + MyLog := func() { + if request_id, ok := mgr.GetValue(request_id_key); ok { + fmt.Println("My request id is:", request_id) + } else { + fmt.Println("No request id found") + } + } + + mgr.SetValues(Values{request_id_key: "12345"}, func() { + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + MyLog() + }() + wg.Wait() + wg.Add(1) + Go(func() { + defer wg.Done() + MyLog() + }) + wg.Wait() + }) + + // Output: No request id found + // My request id is: 12345 +} + +func BenchmarkGetValue(b *testing.B) { + mgr := NewContextManager() + mgr.SetValues(Values{"test_key": "test_val"}, func() { + b.ResetTimer() + for i := 0; i < b.N; i++ { + val, ok := mgr.GetValue("test_key") + if !ok || val != "test_val" { + b.FailNow() + } + } + }) +} + +func BenchmarkSetValues(b *testing.B) { + mgr := NewContextManager() + for i := 0; i < b.N/2; i++ { + mgr.SetValues(Values{"test_key": "test_val"}, func() { + mgr.SetValues(Values{"test_key2": "test_val2"}, func() {}) + }) + } +} diff --git a/vendor/github.com/kisielk/og-rek/.gitignore b/vendor/github.com/kisielk/og-rek/.gitignore new file mode 100644 index 0000000000..29585fe79b --- /dev/null +++ b/vendor/github.com/kisielk/og-rek/.gitignore @@ -0,0 +1 @@ +*-fuzz.zip diff --git a/vendor/github.com/kisielk/og-rek/.travis.yml b/vendor/github.com/kisielk/og-rek/.travis.yml new file mode 100644 index 0000000000..b083cdc790 --- /dev/null +++ b/vendor/github.com/kisielk/og-rek/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip diff --git a/vendor/github.com/kisielk/og-rek/encode_test.go b/vendor/github.com/kisielk/og-rek/encode_test.go new file mode 100644 index 0000000000..a2b8689c5c --- /dev/null +++ b/vendor/github.com/kisielk/og-rek/encode_test.go @@ -0,0 +1,90 @@ +package ogórek + +import ( + "bytes" + "io" + "reflect" + "testing" +) + +func TestEncode(t *testing.T) { + + type foo struct { + Foo string + Bar int32 + } + + tests := []struct { + name string + input interface{} + output interface{} + }{ + { + "graphite message", + []interface{}{map[interface{}]interface{}{"values": []interface{}{float64(473), float64(497), float64(540), float64(1497), float64(1808), float64(1890), float64(2013), float64(1821), float64(1847), float64(2176), float64(2156), float64(1250), float64(2055), float64(1570), None{}, None{}}, "start": int64(1383782400), "step": int64(86400), "end": int64(1385164800), "name": "ZZZZ.UUUUUUUU.CCCCCCCC.MMMMMMMM.XXXXXXXXX.TTT"}}, + nil, + }, + { + "small types", + []interface{}{int64(0), int64(1), int64(258), int64(65537), false, true}, + nil, + }, + { + "array of struct types", + []foo{{"Qux", 4}}, + []interface{}{map[interface{}]interface{}{"Foo": "Qux", "Bar": int64(4)}}, + }, + } + + for _, tt := range tests { + p := &bytes.Buffer{} + e := NewEncoder(p) + err := e.Encode(tt.input) + if err != nil { + t.Errorf("%s: encode error: %v", tt.name, err) + } + + d := NewDecoder(bytes.NewReader(p.Bytes())) + output, _ := d.Decode() + + want := tt.output + if want == nil { + want = tt.input + } + + if !reflect.DeepEqual(want, output) { + t.Errorf("%s: got\n%q\n expected\n%q", tt.name, output, want) + } + + for l := int64(p.Len())-1; l >= 0; l-- { + p.Reset() + e := NewEncoder(LimitWriter(p, l)) + err = e.Encode(tt.input) + if err != io.EOF { + t.Errorf("%s: encoder did not handle write error @%v: got %#v", tt.name, l, err) + } + } + + } +} + +// like io.LimitedReader but for writes +// XXX it would be good to have it in stdlib +type LimitedWriter struct { + W io.Writer + N int64 +} + +func (l *LimitedWriter) Write(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.N { + p = p[0:l.N] + } + n, err = l.W.Write(p) + l.N -= int64(n) + return +} + +func LimitWriter(w io.Writer, n int64) io.Writer { return &LimitedWriter{w, n} } diff --git a/vendor/github.com/kisielk/og-rek/ogorek_test.go b/vendor/github.com/kisielk/og-rek/ogorek_test.go new file mode 100644 index 0000000000..4201016bbd --- /dev/null +++ b/vendor/github.com/kisielk/og-rek/ogorek_test.go @@ -0,0 +1,376 @@ +package ogórek + +import ( + "bytes" + "encoding/hex" + "io" + "math/big" + "reflect" + "strconv" + "strings" + "testing" +) + +func bigInt(s string) *big.Int { + i := new(big.Int) + _, ok := i.SetString(s, 10) + if !ok { + panic("bigInt") + } + return i +} + +func TestMarker(t *testing.T) { + buf := bytes.Buffer{} + dec := NewDecoder(&buf) + dec.mark() + k, err := dec.marker() + if err != nil { + t.Error(err) + } + if k != 0 { + t.Error("no marker found") + } +} + +var graphitePickle1, _ = hex.DecodeString("80025d71017d710228550676616c75657371035d71042847407d90000000000047407f100000000000474080e0000000000047409764000000000047409c40000000000047409d88000000000047409f74000000000047409c74000000000047409cdc00000000004740a10000000000004740a0d800000000004740938800000000004740a00e00000000004740988800000000004e4e655505737461727471054a00d87a5255047374657071064a805101005503656e6471074a00f08f5255046e616d657108552d5a5a5a5a2e55555555555555552e43434343434343432e4d4d4d4d4d4d4d4d2e5858585858585858582e545454710975612e") +var graphitePickle2, _ = hex.DecodeString("286c70300a286470310a53277374617274270a70320a49313338333738323430300a73532773746570270a70330a4938363430300a735327656e64270a70340a49313338353136343830300a73532776616c756573270a70350a286c70360a463437332e300a61463439372e300a61463534302e300a6146313439372e300a6146313830382e300a6146313839302e300a6146323031332e300a6146313832312e300a6146313834372e300a6146323137362e300a6146323135362e300a6146313235302e300a6146323035352e300a6146313537302e300a614e614e617353276e616d65270a70370a5327757365722e6c6f67696e2e617265612e6d616368696e652e6d65747269632e6d696e757465270a70380a73612e") +var graphitePickle3, _ = hex.DecodeString("286c70310a286470320a5327696e74657276616c73270a70330a286c70340a7353276d65747269635f70617468270a70350a5327636172626f6e2e6167656e7473270a70360a73532769734c656166270a70370a4930300a7361286470380a67330a286c70390a7367350a5327636172626f6e2e61676772656761746f72270a7031300a7367370a4930300a736128647031310a67330a286c7031320a7367350a5327636172626f6e2e72656c617973270a7031330a7367370a4930300a73612e") + +var tests = []struct { + name string + input string + expected interface{} +}{ + {"int", "I5\n.", int64(5)}, + {"float", "F1.23\n.", float64(1.23)}, + {"long", "L12321231232131231231L\n.", bigInt("12321231232131231231")}, + {"None", "N.", None{}}, + {"empty tuple", "(t.", Tuple{}}, + {"tuple of two ints", "(I1\nI2\ntp0\n.", Tuple{int64(1), int64(2)}}, + {"nested tuples", "((I1\nI2\ntp0\n(I3\nI4\ntp1\ntp2\n.", + Tuple{Tuple{int64(1), int64(2)}, Tuple{int64(3), int64(4)}}}, + {"tuple with top 1 items from stack", "I0\n\x85.", Tuple{int64(0)}}, + {"tuple with top 2 items from stack", "I0\nI1\n\x86.", Tuple{int64(0), int64(1)}}, + {"tuple with top 3 items from stack", "I0\nI1\nI2\n\x87.", Tuple{int64(0), int64(1), int64(2)}}, + {"empty list", "(lp0\n.", []interface{}{}}, + {"list of numbers", "(lp0\nI1\naI2\naI3\naI4\na.", []interface{}{int64(1), int64(2), int64(3), int64(4)}}, + {"string", "S'abc'\np0\n.", string("abc")}, + {"unicode", "V\\u65e5\\u672c\\u8a9e\np0\n.", string("日本語")}, + {"unicode2", "V' \\u77e5\\u4e8b\\u5c11\\u65f6\\u70e6\\u607c\\u5c11\\u3001\\u8bc6\\u4eba\\u591a\\u5904\\u662f\\u975e\\u591a\\u3002\n.", string("' 知事少时烦恼少、识人多处是非多。")}, + {"empty dict", "(dp0\n.", make(map[interface{}]interface{})}, + {"dict with strings", "(dp0\nS'a'\np1\nS'1'\np2\nsS'b'\np3\nS'2'\np4\ns.", map[interface{}]interface{}{"a": "1", "b": "2"}}, + {"GLOBAL and REDUCE opcodes", "cfoo\nbar\nS'bing'\n\x85R.", Call{Callable: Class{Module: "foo", Name: "bar"}, Args: []interface{}{"bing"}}}, + {"LONG_BINPUT opcode", "(lr0000I17\na.", []interface{}{int64(17)}}, + {"graphite message1", string(graphitePickle1), []interface{}{map[interface{}]interface{}{"values": []interface{}{float64(473), float64(497), float64(540), float64(1497), float64(1808), float64(1890), float64(2013), float64(1821), float64(1847), float64(2176), float64(2156), float64(1250), float64(2055), float64(1570), None{}, None{}}, "start": int64(1383782400), "step": int64(86400), "end": int64(1385164800), "name": "ZZZZ.UUUUUUUU.CCCCCCCC.MMMMMMMM.XXXXXXXXX.TTT"}}}, + {"graphite message2", string(graphitePickle2), []interface{}{map[interface{}]interface{}{"values": []interface{}{float64(473), float64(497), float64(540), float64(1497), float64(1808), float64(1890), float64(2013), float64(1821), float64(1847), float64(2176), float64(2156), float64(1250), float64(2055), float64(1570), None{}, None{}}, "start": int64(1383782400), "step": int64(86400), "end": int64(1385164800), "name": "user.login.area.machine.metric.minute"}}}, + {"graphite message3", string(graphitePickle3), []interface{}{map[interface{}]interface{}{"intervals": []interface{}{}, "metric_path": "carbon.agents", "isLeaf": false}, map[interface{}]interface{}{"intervals": []interface{}{}, "metric_path": "carbon.aggregator", "isLeaf": false}, map[interface{}]interface{}{"intervals": []interface{}{}, "metric_path": "carbon.relays", "isLeaf": false}}}, + {"too long line", "V28,34,30,55,100,130,87,169,194,202,232,252,267,274,286,315,308,221,358,368,401,406,434,452,475,422,497,530,517,559,400,418,571,578,599,600,625,630,635,647,220,715,736,760,705,785,794,495,808,852,861,863,869,875,890,893,896,922,812,980,1074,1087,1145,1153,1163,1171,445,1195,1203,1242,1255,1274,52,1287,1319,636,1160,1339,1345,1353,1369,1391,1396,1405,1221,1410,1431,1451,1460,1470,1472,1492,1517,1528,419,1530,1532,1535,1573,1547,1574,1437,1594,1595,847,1551,983,1637,1647,1666,1672,1691,1726,1515,1731,1739,1741,1723,1776,1685,505,1624,1436,1890,728,1910,1931,1544,2013,2025,2030,2043,2069,1162,2129,2160,2199,2210,1911,2246,804,2276,1673,2299,2315,2322,2328,2355,2376,2405,1159,2425,2430,2452,1804,2442,2567,2577,1167,2611,2534,1879,2623,2682,2699,2652,2742,2754,2774,2782,2795,2431,2821,2751,2850,2090,513,2898,592,2932,2933,1555,2969,3003,3007,3010,2595,3064,3087,3105,3106,3110,151,3129,3132,304,3173,3205,3233,3245,3279,3302,3307,714,316,3331,3347,3360,3375,3380,3442,2620,3482,3493,3504,3516,3517,3518,3533,3511,2681,3530,3601,3606,3615,1210,3633,3651,3688,3690,3781,1907,3839,3840,3847,3867,3816,3899,3924,2345,3912,3966,982,4040,4056,4076,4084,4105,2649,4171,3873,1415,3567,4188,4221,4227,4231,2279,4250,4253,770,894,4343,4356,4289,4404,4438,2572,3124,4334,2114,3953,4522,4537,4561,4571,641,4629,4640,4664,4687,4702,4709,4740,4605,4746,4768,3856,3980,4814,2984,4895,4908,1249,4944,4947,4979,4988,4995,32,4066,5043,4956,5069,5072,5076,5084,5085,5137,4262,5152,479,5156,3114,1277,5183,5186,1825,5106,5216,963,5239,5252,5218,5284,1980,1972,5352,5364,5294,5379,5387,5391,5397,5419,5434,5468,5471,3350,5510,5522,5525,5538,5554,5573,5597,5610,5615,5624,842,2851,5641,5655,5656,5658,5678,5682,5696,5699,5709,5728,5753,851,5805,3528,5822,801,5855,2929,5871,5899,5918,5925,5927,5931,5935,5939,5958,778,5971,5980,5300,6009,6023,6030,6032,6016,6110,5009,6155,6197,1760,6253,6267,4886,5608,6289,6308,6311,6321,6316,6333,6244,6070,6349,6353,6186,6357,6366,6386,6387,6389,6399,6411,6421,6432,6437,6465,6302,6493,5602,6511,6529,6536,6170,6557,6561,6577,6581,6590,5290,5649,6231,6275,6635,6651,6652,5929,6692,6693,6695,6705,6711,6723,6738,6752,6753,3629,2975,6790,5845,338,6814,6826,6478,6860,6872,6882,880,356,6897,4102,6910,6611,1030,6934,6936,6987,6984,6999,827,6902,7027,7049,7051,4628,7084,7083,7071,7102,7137,5867,7152,6048,2410,3896,7168,7177,7224,6606,7233,1793,7261,7284,7290,7292,5212,7315,6964,3238,355,1969,4256,448,7325,908,2824,2981,3193,3363,3613,5325,6388,2247,1348,72,131,5414,7285,7343,7349,7362,7372,7381,7410,7418,7443,5512,7470,7487,7497,7516,7277,2622,2863,945,4344,3774,1024,2272,7523,4476,256,5643,3164,7539,7540,7489,1932,7559,7575,7602,7605,7609,7608,7619,7204,7652,7663,6907,7672,7654,7674,7687,7718,7745,1202,4030,7797,7801,7799,2924,7871,7873,7900,7907,7911,7912,7917,7923,7935,8007,8017,7636,8084,8087,3686,8114,8153,8158,8171,8175,8182,8205,8222,8225,8229,8232,8234,8244,8247,7256,8279,6929,8285,7040,8328,707,6773,7949,8468,5759,6344,8509,1635\n.", "28,34,30,55,100,130,87,169,194,202,232,252,267,274,286,315,308,221,358,368,401,406,434,452,475,422,497,530,517,559,400,418,571,578,599,600,625,630,635,647,220,715,736,760,705,785,794,495,808,852,861,863,869,875,890,893,896,922,812,980,1074,1087,1145,1153,1163,1171,445,1195,1203,1242,1255,1274,52,1287,1319,636,1160,1339,1345,1353,1369,1391,1396,1405,1221,1410,1431,1451,1460,1470,1472,1492,1517,1528,419,1530,1532,1535,1573,1547,1574,1437,1594,1595,847,1551,983,1637,1647,1666,1672,1691,1726,1515,1731,1739,1741,1723,1776,1685,505,1624,1436,1890,728,1910,1931,1544,2013,2025,2030,2043,2069,1162,2129,2160,2199,2210,1911,2246,804,2276,1673,2299,2315,2322,2328,2355,2376,2405,1159,2425,2430,2452,1804,2442,2567,2577,1167,2611,2534,1879,2623,2682,2699,2652,2742,2754,2774,2782,2795,2431,2821,2751,2850,2090,513,2898,592,2932,2933,1555,2969,3003,3007,3010,2595,3064,3087,3105,3106,3110,151,3129,3132,304,3173,3205,3233,3245,3279,3302,3307,714,316,3331,3347,3360,3375,3380,3442,2620,3482,3493,3504,3516,3517,3518,3533,3511,2681,3530,3601,3606,3615,1210,3633,3651,3688,3690,3781,1907,3839,3840,3847,3867,3816,3899,3924,2345,3912,3966,982,4040,4056,4076,4084,4105,2649,4171,3873,1415,3567,4188,4221,4227,4231,2279,4250,4253,770,894,4343,4356,4289,4404,4438,2572,3124,4334,2114,3953,4522,4537,4561,4571,641,4629,4640,4664,4687,4702,4709,4740,4605,4746,4768,3856,3980,4814,2984,4895,4908,1249,4944,4947,4979,4988,4995,32,4066,5043,4956,5069,5072,5076,5084,5085,5137,4262,5152,479,5156,3114,1277,5183,5186,1825,5106,5216,963,5239,5252,5218,5284,1980,1972,5352,5364,5294,5379,5387,5391,5397,5419,5434,5468,5471,3350,5510,5522,5525,5538,5554,5573,5597,5610,5615,5624,842,2851,5641,5655,5656,5658,5678,5682,5696,5699,5709,5728,5753,851,5805,3528,5822,801,5855,2929,5871,5899,5918,5925,5927,5931,5935,5939,5958,778,5971,5980,5300,6009,6023,6030,6032,6016,6110,5009,6155,6197,1760,6253,6267,4886,5608,6289,6308,6311,6321,6316,6333,6244,6070,6349,6353,6186,6357,6366,6386,6387,6389,6399,6411,6421,6432,6437,6465,6302,6493,5602,6511,6529,6536,6170,6557,6561,6577,6581,6590,5290,5649,6231,6275,6635,6651,6652,5929,6692,6693,6695,6705,6711,6723,6738,6752,6753,3629,2975,6790,5845,338,6814,6826,6478,6860,6872,6882,880,356,6897,4102,6910,6611,1030,6934,6936,6987,6984,6999,827,6902,7027,7049,7051,4628,7084,7083,7071,7102,7137,5867,7152,6048,2410,3896,7168,7177,7224,6606,7233,1793,7261,7284,7290,7292,5212,7315,6964,3238,355,1969,4256,448,7325,908,2824,2981,3193,3363,3613,5325,6388,2247,1348,72,131,5414,7285,7343,7349,7362,7372,7381,7410,7418,7443,5512,7470,7487,7497,7516,7277,2622,2863,945,4344,3774,1024,2272,7523,4476,256,5643,3164,7539,7540,7489,1932,7559,7575,7602,7605,7609,7608,7619,7204,7652,7663,6907,7672,7654,7674,7687,7718,7745,1202,4030,7797,7801,7799,2924,7871,7873,7900,7907,7911,7912,7917,7923,7935,8007,8017,7636,8084,8087,3686,8114,8153,8158,8171,8175,8182,8205,8222,8225,8229,8232,8234,8244,8247,7256,8279,6929,8285,7040,8328,707,6773,7949,8468,5759,6344,8509,1635"}, + {"FRAME Opcode and int", "\x95\x00\x00\x00\x00\x00\x00\x00\x00I5\n.", int64(5)}, + {"SHORTBINUNICODE opcode", "\x8c\t\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e\x94.", "日本語"}, +} + +func TestDecode(t *testing.T) { + for _, test := range tests { + // decode(input) -> expected + buf := bytes.NewBufferString(test.input) + dec := NewDecoder(buf) + v, err := dec.Decode() + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(v, test.expected) { + t.Errorf("%s: decode:\nhave: %#v\nwant: %#v", test.name, v, test.expected) + } + + // decode more -> EOF + v, err = dec.Decode() + if !(v == nil && err == io.EOF) { + t.Errorf("%s: decode: no EOF at end: v = %#v err = %#v", test.name, v, err) + } + + // expected (= decoded(input)) -> encode -> decode = identity + buf.Reset() + enc := NewEncoder(buf) + err = enc.Encode(test.expected) + if err != nil { + t.Errorf("%s: encode(expected): %v", test.name, err) + } else { + dec := NewDecoder(buf) + v, err := dec.Decode() + if err != nil { + t.Error(err) + } + + if !reflect.DeepEqual(v, test.expected) { + t.Errorf("%s: expected -> decode -> encode != identity\nhave: %#v\nwant: %#v", test.name, v, test.expected) + } + } + + // for truncated input io.ErrUnexpectedEOF must be returned + for l := len(test.input) - 1; l > 0; l-- { + buf := bytes.NewBufferString(test.input[:l]) + dec := NewDecoder(buf) + //println(test.name, l) + v, err := dec.Decode() + // strconv.UnquoteChar used in loadUnicode always returns + // SyntaxError, at least unless the following CL is accepted: + // https://go-review.googlesource.com/37052 + if err == strconv.ErrSyntax && strings.HasPrefix(test.name, "unicode") { + err = io.ErrUnexpectedEOF + } + if !(v == nil && err == io.ErrUnexpectedEOF) { + t.Errorf("%s: no ErrUnexpectedEOF on [:%d] truncated stream: v = %#v err = %#v", test.name, l, v, err) + } + } + + // by using input with omitted prefix we can test how code handles pickle stack overflow: + // it must not panic + for i := 0; i < len(test.input); i++ { + buf := bytes.NewBufferString(test.input[i:]) + dec := NewDecoder(buf) + func() { + defer func() { + if r := recover(); r != nil { + t.Errorf("%s: panic on input[%d:]: %v", test.name, i, r) + } + }() + dec.Decode() + }() + } + } +} + +// test that .Decode() decodes only until stop opcode, and can continue +// decoding further on next call +func TestDecodeMultiple(t *testing.T) { + input := "I5\n.I7\n.N." + expected := []interface{}{int64(5), int64(7), None{}} + + buf := bytes.NewBufferString(input) + dec := NewDecoder(buf) + + for i, objOk := range expected { + obj, err := dec.Decode() + if err != nil { + t.Errorf("step #%v: %v", i, err) + } + + if !reflect.DeepEqual(obj, objOk) { + t.Errorf("step #%v: %q ; want %q", i, obj, objOk) + } + } + + obj, err := dec.Decode() + if !(obj == nil && err == io.EOF) { + t.Errorf("decode: no EOF at end: obj = %#v err = %#v", obj, err) + } +} + +func TestZeroLengthData(t *testing.T) { + data := "" + output, err := decodeLong(data) + if err != nil { + t.Errorf("Error from decodeLong - %v\n", err) + } + if output.BitLen() > 0 { + t.Fail() + } +} + +func TestValue1(t *testing.T) { + data := "\xff\x00" + output, err := decodeLong(data) + if err != nil { + t.Errorf("Error from decodeLong - %v\n", err) + } + target := big.NewInt(255) + if target.Cmp(output) != 0 { + t.Fail() + } +} + +func TestValue2(t *testing.T) { + data := "\xff\x7f" + output, err := decodeLong(data) + if err != nil { + t.Errorf("Error from decodeLong - %v\n", err) + } + target := big.NewInt(32767) + if target.Cmp(output) != 0 { + t.Fail() + } +} + +func TestValue3(t *testing.T) { + data := "\x00\xff" + output, err := decodeLong(data) + if err != nil { + t.Errorf("Error from decodeLong - %v\n", err) + } + target := big.NewInt(256) + target.Neg(target) + if target.Cmp(output) != 0 { + t.Logf("\nGot %v\nExpecting %v\n", output, target) + t.Fail() + } +} + +func TestValue4(t *testing.T) { + data := "\x00\x80" + output, err := decodeLong(data) + if err != nil { + t.Errorf("Error from decodeLong - %v\n", err) + } + target := big.NewInt(32768) + target.Neg(target) + if target.Cmp(output) != 0 { + t.Logf("\nGot %v\nExpecting %v\n", output, target) + t.Fail() + } +} + +func TestValue5(t *testing.T) { + data := "\x80" + output, err := decodeLong(data) + if err != nil { + t.Errorf("Error from decodeLong - %v\n", err) + } + target := big.NewInt(128) + target.Neg(target) + if target.Cmp(output) != 0 { + t.Logf("\nGot %v\nExpecting %v\n", output, target) + t.Fail() + } +} + +func TestValue6(t *testing.T) { + data := "\x7f" + output, err := decodeLong(data) + if err != nil { + t.Errorf("Error from decodeLong - %v\n", err) + } + target := big.NewInt(127) + if target.Cmp(output) != 0 { + t.Fail() + } +} + +func BenchmarkSpeed(b *testing.B) { + for i := 0; i < b.N; i++ { + data := "\x00\x80" + _, err := decodeLong(data) + if err != nil { + b.Errorf("Error from decodeLong - %v\n", err) + } + } +} + +func TestMemoOpCode(t *testing.T) { + buf := bytes.NewBufferString("I5\n\x94.") + dec := NewDecoder(buf) + _, err := dec.Decode() + if err != nil { + t.Errorf("Error from TestMemoOpCode - %v\n", err) + } + if dec.memo["0"] != int64(5) { + t.Errorf("Error from TestMemoOpCode - Top stack value was not added to memo") + } + +} + +// verify that decode of erroneous input produces error +func TestDecodeError(t *testing.T) { + testv := []string{ + // all kinds of opcodes to read memo but key is not there + "}g1\n.", + "}h\x01.", + "}j\x01\x02\x03\x04.", + + // invalid long format + "L123\n.", + "L12qL\n.", + } + for _, tt := range testv { + buf := bytes.NewBufferString(tt) + dec := NewDecoder(buf) + v, err := dec.Decode() + if !(v == nil && err != nil) { + t.Errorf("%q: no decode error ; got %#v, %#v", tt, v, err) + } + } +} + +func TestFuzzCrashers(t *testing.T) { + crashers := []string{ + "(dS''\n(lc\n\na2a2a22aasS''\na", + "S\n", + "((dd", + "}}}s", + "(((ld", + "(dS''\n(lp4\nsg4\n(s", + "}((tu", + "}((du", + "(c\n\nc\n\n\x85Rd", + "}(U\x040000u", + "(\x88d", + } + + for _, c := range crashers { + buf := bytes.NewBufferString(c) + dec := NewDecoder(buf) + dec.Decode() + } +} + +func BenchmarkDecode(b *testing.B) { + // prepare one large pickle stream from all test pickles + input := make([]byte, 0) + for _, test := range tests { + input = append(input, test.input...) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf := bytes.NewBuffer(input) + dec := NewDecoder(buf) + + j := 0 + for ; ; j++ { + _, err := dec.Decode() + if err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + + if j != len(tests) { + b.Fatalf("unexpected # of decode steps: got %v ; want %v", j, len(tests)) + } + } +} + +func BenchmarkEncode(b *testing.B) { + // prepare one large slice from all test vector values + input := make([]interface{}, 0) + approxOutSize := 0 + for _, test := range tests { + input = append(input, test.expected) + approxOutSize += len(test.input) + } + + buf := bytes.NewBuffer(make([]byte, approxOutSize)) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + buf.Reset() + enc := NewEncoder(buf) + err := enc.Encode(input) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/kisielk/whisper-go/whisper/whisper_test.go b/vendor/github.com/kisielk/whisper-go/whisper/whisper_test.go new file mode 100644 index 0000000000..b38dac44bc --- /dev/null +++ b/vendor/github.com/kisielk/whisper-go/whisper/whisper_test.go @@ -0,0 +1,335 @@ +package whisper + +import ( + "io/ioutil" + "os" + "testing" + "time" +) + +var ainfo = NewArchiveInfo + +func tempFileName() string { + f, err := ioutil.TempFile("", "whisper") + if err != nil { + panic(err) + } + f.Close() + os.Remove(f.Name()) + return f.Name() +} + +func TestQuantizeArchive(t *testing.T) { + points := archive{Point{0, 0}, Point{3, 0}, Point{10, 0}} + pointsOut := archive{Point{0, 0}, Point{2, 0}, Point{10, 0}} + quantizedPoints := quantizeArchive(points, 2) + for i := range quantizedPoints { + if quantizedPoints[i] != pointsOut[i] { + t.Errorf("%v != %v", quantizedPoints[i], pointsOut[i]) + } + } +} + +func TestQuantizePoint(t *testing.T) { + var pointTests = []struct { + in uint32 + resolution uint32 + out uint32 + }{ + {0, 2, 0}, + {3, 2, 2}, + } + + for i, tt := range pointTests { + q := quantize(tt.in, tt.resolution) + if q != tt.out { + t.Errorf("%d. quantizePoint(%q, %q) => %q, want %q", i, tt.in, tt.resolution, q, tt.out) + } + } +} + +func TestAggregate(t *testing.T) { + points := archive{Point{0, 0}, Point{0, 1}, Point{0, 2}, Point{0, 1}} + expected := Point{0, 1} + if p, err := aggregate(AggregationAverage, points); (p != expected) || (err != nil) { + t.Errorf("Average failed to average to %v, got %v: %v", expected, p, err) + } + + expected = Point{0, 4} + if p, err := aggregate(AggregationSum, points); (p != expected) || (err != nil) { + t.Errorf("Sum failed to aggregate to %v, got %v: %v", expected, p, err) + } + + expected = Point{0, 1} + if p, err := aggregate(AggregationLast, points); (p != expected) || (err != nil) { + t.Errorf("Last failed to aggregate to %v, got %v: %v", expected, p, err) + } + + expected = Point{0, 2} + if p, err := aggregate(AggregationMax, points); (p != expected) || (err != nil) { + t.Errorf("Max failed to aggregate to %v, got %v: %v", expected, p, err) + } + + expected = Point{0, 0} + if p, err := aggregate(AggregationMin, points); (p != expected) || (err != nil) { + t.Errorf("Min failed to aggregate to %v, got %v: %v", expected, p, err) + } + + if _, err := aggregate(1000, points); err == nil { + t.Errorf("No error for invalid aggregation") + } +} + +func TestParseArchiveInfo(t *testing.T) { + tests := map[string]ArchiveInfo{ + "60:1440": ArchiveInfo{0, 60, 1440}, // 60 seconds per datapoint, 1440 datapoints = 1 day of retention + "15m:8": ArchiveInfo{0, 15 * 60, 8}, // 15 minutes per datapoint, 8 datapoints = 2 hours of retention + "1h:7d": ArchiveInfo{0, 3600, 168}, // 1 hour per datapoint, 7 days of retention + "12h:2y": ArchiveInfo{0, 43200, 1456}, // 12 hours per datapoint, 2 years of retention + } + + for info, expected := range tests { + if a, err := ParseArchiveInfo(info); (a != expected) || (err != nil) { + t.Errorf("%s: %v != %v, %v", info, a, expected, err) + } + } + +} + +func TestWhisperAggregation(t *testing.T) { + filename := tempFileName() + defer os.Remove(filename) + options := DefaultCreateOptions() + options.AggregationMethod = AggregationMin + w, err := Create(filename, []ArchiveInfo{NewArchiveInfo(60, 60)}, options) + if err != nil { + t.Fatal("failed to create database:", err) + } + defer func() { + if err := w.Close(); err != nil { + t.Fatal("failed to close database:", err) + } + }() + + w.SetAggregationMethod(AggregationMax) + if method := w.Header.Metadata.AggregationMethod; method != AggregationMax { + t.Fatalf("AggregationMethod: %d, want %d", method, AggregationMax) + } +} + +func TestArchiveHeader(t *testing.T) { + filename := tempFileName() + defer os.Remove(filename) + + w, err := Create(filename, []ArchiveInfo{ainfo(1, 60), ainfo(60, 60)}, DefaultCreateOptions()) + if err != nil { + t.Fatal("failed to create database:", err) + } + + hSize := headerSize(2) + verifyHeader := func(w *Whisper) { + meta := w.Header.Metadata + expectedMeta := Metadata{AggregationAverage, 60 * 60, 0.5, 2} + if meta != expectedMeta { + t.Errorf("bad metadata, got %v want %v", meta, expectedMeta) + } + + archive0 := ArchiveInfo{hSize, 1, 60} + if w.Header.Archives[0] != archive0 { + t.Errorf("bad archive 0, got %v want %v", w.Header.Archives[0], archive0) + } + + archive1 := ArchiveInfo{hSize + pointSize*60, 60, 60} + if w.Header.Archives[1] != archive1 { + t.Errorf("bad archive 1, got %v want %v", w.Header.Archives[1], archive1) + } + } + + verifyHeader(w) + if err := w.Close(); err != nil { + t.Fatal("failed to close database:", err) + } + + w, err = Open(filename) + if err != nil { + t.Fatal("failed to open database:", err) + } + verifyHeader(w) + if err := w.Close(); err != nil { + t.Fatal("failed to close database:", err) + } +} + +func TestFetch(t *testing.T) { + filename := tempFileName() + defer os.Remove(filename) + + const ( + step = 60 + nPoints = 100 + ) + + w, err := Create(filename, []ArchiveInfo{NewArchiveInfo(step, nPoints)}, DefaultCreateOptions()) + if err != nil { + t.Fatal("failed to create database:", err) + } + defer func() { + if err := w.Close(); err != nil { + t.Fatal("failed to close database:", err) + } + }() + + points := make([]Point, nPoints) + now := time.Now() + for i := 0; i < nPoints; i++ { + points[i] = NewPoint(now.Add(-time.Duration(nPoints-1-i)*time.Minute), float64(i)) + } + err = w.UpdateMany(points) + if err != nil { + t.Fatal("failed to update points:", err) + } + + _, fetchedPoints, err := w.FetchUntil(1, 0) + if err == nil { + t.Fatal("no error from nonsensical fetch, fetched", fetchedPoints) + } + + _, fetchedPoints, err = w.Fetch(0) + if err != nil { + t.Fatal("error fetching points:", err) + } + if len(fetchedPoints) != nPoints { + t.Fatalf("got %d points, want %d", len(fetchedPoints), nPoints) + } + for i := range fetchedPoints { + point := points[i] + point.Timestamp = quantize(point.Timestamp, step) + if fetchedPoints[i] != point { + t.Errorf("point %d: got %v, want %v", i, fetchedPoints[i], point) + } + } +} + +// TestMaxRetention tests the behaviour of an archive's maximum retenetion. +func TestMaxRetention(t *testing.T) { + filename := tempFileName() + defer os.Remove(filename) + + w, err := Create(filename, []ArchiveInfo{NewArchiveInfo(60, 10)}, DefaultCreateOptions()) + if err != nil { + t.Fatal("failed to create database:", err) + } + defer func() { + if err := w.Close(); err != nil { + t.Fatal("failed to close database:", err) + } + }() + + invalid := NewPoint(time.Now().Add(-11*time.Minute), 0) + if err = w.Update(invalid); err == nil { + t.Fatal("invalid point did not return an error") + } + valid := NewPoint(time.Now().Add(-9*time.Minute), 0) + if err = w.Update(valid); err != nil { + t.Fatalf("valid point returned an error: %s", err) + } +} + +func TestCreateTwice(t *testing.T) { + filename := tempFileName() + archiveInfos := []ArchiveInfo{NewArchiveInfo(60, 10)} + defer os.Remove(filename) + + w, err := Create(filename, archiveInfos, DefaultCreateOptions()) + if err != nil { + t.Fatal("failed to create database:", err) + } + if err := w.Close(); err != nil { + t.Fatal("failed to close database:", err) + } + + _, err = Create(filename, archiveInfos, DefaultCreateOptions()) + if err == nil { + t.Fatal("no error when attempting to overwrite database") + } +} + +func TestValidateArchiveList(t *testing.T) { + tests := []struct { + Archives []ArchiveInfo + Error error + }{ + {[]ArchiveInfo{}, ErrNoArchives}, + {[]ArchiveInfo{ainfo(10, 10), ainfo(10, 5)}, ErrDuplicateArchive}, + {[]ArchiveInfo{ainfo(2, 5), ainfo(3, 5)}, ErrUnevenPrecision}, + {[]ArchiveInfo{ainfo(10, 6), ainfo(5, 13)}, ErrLowRetention}, + {[]ArchiveInfo{ainfo(10, 6), ainfo(70, 10)}, ErrInsufficientPoints}, + {[]ArchiveInfo{ainfo(2, 5), ainfo(4, 10), ainfo(8, 20)}, nil}, + + // The following tests adapted from test_whisper.py + {[]ArchiveInfo{ainfo(1, 60), ainfo(60, 60)}, nil}, + {[]ArchiveInfo{ainfo(1, 60), ainfo(60, 60), ainfo(1, 60)}, ErrDuplicateArchive}, + {[]ArchiveInfo{ainfo(60, 60), ainfo(6, 60)}, nil}, + {[]ArchiveInfo{ainfo(60, 60), ainfo(7, 60)}, ErrUnevenPrecision}, + {[]ArchiveInfo{ainfo(1, 60), ainfo(10, 1)}, ErrLowRetention}, + {[]ArchiveInfo{ainfo(1, 30), ainfo(60, 60)}, ErrInsufficientPoints}, + } + + for i, test := range tests { + if err := validateArchiveList(test.Archives); err != test.Error { + t.Errorf("%d: got: %v, want: %v", i, err, test.Error) + } + } +} + +// Test that values are aggregated correctly when rolling up into lower archive +func TestArchiveRollup(t *testing.T) { + filename := tempFileName() + defer os.Remove(filename) + + options := DefaultCreateOptions() + options.AggregationMethod = AggregationSum + ai1, err := ParseArchiveInfo("5s:1m") + ai2, err := ParseArchiveInfo("10s:2m") + if err != nil { + t.Fatal(err) + } + w, err := Create(filename, []ArchiveInfo{ai1, ai2}, options) + if err != nil { + t.Fatal("failed to create database:", err) + } + defer func() { + if err := w.Close(); err != nil { + t.Fatal("failed to close database:", err) + } + }() + + nPoints := 5 + points := make([]Point, nPoints) + now := time.Now() + for i := 0; i < nPoints; i++ { + points[i] = NewPoint(now.Add(-time.Duration((nPoints-i)*5)*time.Second), float64(1)) + } + err = w.UpdateMany(points) + if err != nil { + t.Fatal("failed to update points:", err) + } + + oneCount := 0 + twoCount := 0 + + dump, err := w.DumpArchive(1) + if err != nil { + t.Fatal("failed to read archive:", err) + } + + for _, point := range dump { + switch point.Value { + case 1: oneCount++ + case 2: twoCount++ + } + } + if oneCount != 1 || twoCount != 2 { + t.Fatal("Archive rollup unexpected values") + } +} diff --git a/vendor/github.com/klauspost/compress/.gitignore b/vendor/github.com/klauspost/compress/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/klauspost/compress/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/compress/.travis.yml b/vendor/github.com/klauspost/compress/.travis.yml new file mode 100644 index 0000000000..182d38ac4e --- /dev/null +++ b/vendor/github.com/klauspost/compress/.travis.yml @@ -0,0 +1,24 @@ +language: go + +sudo: false + +os: + - linux + - osx + +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +install: + - go get -t ./... + +script: + - diff <(gofmt -d .) <(printf "") + - go test -v -cpu=2 ./... + - go test -cpu=2 -tags=noasm ./... + - go test -cpu=1,2,4 -short -race ./... + - go test -cpu=2,4 -short -race -tags=noasm ./... diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md new file mode 100644 index 0000000000..bbf7f6e3c5 --- /dev/null +++ b/vendor/github.com/klauspost/compress/README.md @@ -0,0 +1,290 @@ +# compress + +This package is based on an optimized Deflate function, which is used by gzip/zip/zlib packages. + +It offers slightly better compression at lower compression settings, and up to 3x faster encoding at highest compression level. + +* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). +* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). +* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). +* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) + +[![Build Status](https://travis-ci.org/klauspost/compress.svg?branch=master)](https://travis-ci.org/klauspost/compress) +[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) + +# changelog +* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). +* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. +* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. +* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. +* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. +* Mar 24, 2016: Small speedup for level 1-3. +* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. +* Feb 19, 2016: Handle small payloads faster in level 1-3. +* Feb 19, 2016: Added faster level 2 + 3 compression modes. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 14, 2016: Snappy: Merge upstream changes. +* Feb 14, 2016: Snappy: Fix aggressive skipping. +* Feb 14, 2016: Snappy: Update benchmark. +* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. +* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. +* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. +* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. +* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. +* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. +* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. +* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. +* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! +* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). +* Nov 20 2015: Small optimization to bit writer on 64 bit systems. +* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). +* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. +* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file +* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. + +# usage + +The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: + +| old import | new import | +|--------------------|-----------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | +| `archive/zip` | `github.com/klauspost/compress/zip` | +| `compress/deflate` | `github.com/klauspost/compress/deflate` | + +You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. + +The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). + +Currently there is only minor speedup on decompression (mostly CRC32 calculation). + +# deflate optimizations + +* Minimum matches are 4 bytes, this leads to fewer searches and better compression. (In Go 1.7) +* Stronger hash (iSCSI CRC32) for matches on x64 with SSE 4.2 support. This leads to fewer hash collisions. (Go 1.7 also has improved hashes) +* Literal byte matching using SSE 4.2 for faster match comparisons. (not in Go) +* Bulk hashing on matches. (In Go 1.7) +* Much faster dictionary indexing with `NewWriterDict()`/`Reset()`. (In Go 1.7) +* Make Bit Coder faster by assuming we are on a 64 bit CPU. (In Go 1.7) +* Level 1 compression replaced by converted "Snappy" algorithm. (In Go 1.7) +* Uncompressible content is detected and skipped faster. (Only in BestSpeed in Go) +* A lot of branching eliminated by having two encoders for levels 4-6 and 7-9. (not in Go) +* All heap memory allocations eliminated. (In Go 1.7) + +``` +benchmark old ns/op new ns/op delta +BenchmarkEncodeDigitsSpeed1e4-4 554029 265175 -52.14% +BenchmarkEncodeDigitsSpeed1e5-4 3908558 2416595 -38.17% +BenchmarkEncodeDigitsSpeed1e6-4 37546692 24875330 -33.75% +BenchmarkEncodeDigitsDefault1e4-4 781510 486322 -37.77% +BenchmarkEncodeDigitsDefault1e5-4 15530248 6740175 -56.60% +BenchmarkEncodeDigitsDefault1e6-4 174915710 76498625 -56.27% +BenchmarkEncodeDigitsCompress1e4-4 769995 485652 -36.93% +BenchmarkEncodeDigitsCompress1e5-4 15450113 6929589 -55.15% +BenchmarkEncodeDigitsCompress1e6-4 175114660 73348495 -58.11% +BenchmarkEncodeTwainSpeed1e4-4 560122 275977 -50.73% +BenchmarkEncodeTwainSpeed1e5-4 3740978 2506095 -33.01% +BenchmarkEncodeTwainSpeed1e6-4 35542802 21904440 -38.37% +BenchmarkEncodeTwainDefault1e4-4 828534 549026 -33.74% +BenchmarkEncodeTwainDefault1e5-4 13667153 7528455 -44.92% +BenchmarkEncodeTwainDefault1e6-4 141191770 79952170 -43.37% +BenchmarkEncodeTwainCompress1e4-4 830050 545694 -34.26% +BenchmarkEncodeTwainCompress1e5-4 16620852 8460600 -49.10% +BenchmarkEncodeTwainCompress1e6-4 193326820 90808750 -53.03% + +benchmark old MB/s new MB/s speedup +BenchmarkEncodeDigitsSpeed1e4-4 18.05 37.71 2.09x +BenchmarkEncodeDigitsSpeed1e5-4 25.58 41.38 1.62x +BenchmarkEncodeDigitsSpeed1e6-4 26.63 40.20 1.51x +BenchmarkEncodeDigitsDefault1e4-4 12.80 20.56 1.61x +BenchmarkEncodeDigitsDefault1e5-4 6.44 14.84 2.30x +BenchmarkEncodeDigitsDefault1e6-4 5.72 13.07 2.28x +BenchmarkEncodeDigitsCompress1e4-4 12.99 20.59 1.59x +BenchmarkEncodeDigitsCompress1e5-4 6.47 14.43 2.23x +BenchmarkEncodeDigitsCompress1e6-4 5.71 13.63 2.39x +BenchmarkEncodeTwainSpeed1e4-4 17.85 36.23 2.03x +BenchmarkEncodeTwainSpeed1e5-4 26.73 39.90 1.49x +BenchmarkEncodeTwainSpeed1e6-4 28.14 45.65 1.62x +BenchmarkEncodeTwainDefault1e4-4 12.07 18.21 1.51x +BenchmarkEncodeTwainDefault1e5-4 7.32 13.28 1.81x +BenchmarkEncodeTwainDefault1e6-4 7.08 12.51 1.77x +BenchmarkEncodeTwainCompress1e4-4 12.05 18.33 1.52x +BenchmarkEncodeTwainCompress1e5-4 6.02 11.82 1.96x +BenchmarkEncodeTwainCompress1e6-4 5.17 11.01 2.13x +``` +* "Speed" is compression level 1 +* "Default" is compression level 6 +* "Compress" is compression level 9 +* Test files are [Digits](https://github.com/klauspost/compress/blob/master/testdata/e.txt) (no matches) and [Twain](https://github.com/klauspost/compress/blob/master/testdata/Mark.Twain-Tom.Sawyer.txt) (plain text) . + +As can be seen it shows a very good speedup all across the line. + +`Twain` is a much more realistic benchmark, and will be closer to JSON/HTML performance. Here speed is equivalent or faster, up to 2 times. + +**Without assembly**. This is what you can expect on systems that does not have amd64 and SSE 4: +``` +benchmark old ns/op new ns/op delta +BenchmarkEncodeDigitsSpeed1e4-4 554029 249558 -54.96% +BenchmarkEncodeDigitsSpeed1e5-4 3908558 2295216 -41.28% +BenchmarkEncodeDigitsSpeed1e6-4 37546692 22594905 -39.82% +BenchmarkEncodeDigitsDefault1e4-4 781510 579850 -25.80% +BenchmarkEncodeDigitsDefault1e5-4 15530248 10096561 -34.99% +BenchmarkEncodeDigitsDefault1e6-4 174915710 111470780 -36.27% +BenchmarkEncodeDigitsCompress1e4-4 769995 579708 -24.71% +BenchmarkEncodeDigitsCompress1e5-4 15450113 10266373 -33.55% +BenchmarkEncodeDigitsCompress1e6-4 175114660 110170120 -37.09% +BenchmarkEncodeTwainSpeed1e4-4 560122 260679 -53.46% +BenchmarkEncodeTwainSpeed1e5-4 3740978 2097372 -43.94% +BenchmarkEncodeTwainSpeed1e6-4 35542802 20353449 -42.74% +BenchmarkEncodeTwainDefault1e4-4 828534 646016 -22.03% +BenchmarkEncodeTwainDefault1e5-4 13667153 10056369 -26.42% +BenchmarkEncodeTwainDefault1e6-4 141191770 105268770 -25.44% +BenchmarkEncodeTwainCompress1e4-4 830050 642401 -22.61% +BenchmarkEncodeTwainCompress1e5-4 16620852 11157081 -32.87% +BenchmarkEncodeTwainCompress1e6-4 193326820 121780770 -37.01% + +benchmark old MB/s new MB/s speedup +BenchmarkEncodeDigitsSpeed1e4-4 18.05 40.07 2.22x +BenchmarkEncodeDigitsSpeed1e5-4 25.58 43.57 1.70x +BenchmarkEncodeDigitsSpeed1e6-4 26.63 44.26 1.66x +BenchmarkEncodeDigitsDefault1e4-4 12.80 17.25 1.35x +BenchmarkEncodeDigitsDefault1e5-4 6.44 9.90 1.54x +BenchmarkEncodeDigitsDefault1e6-4 5.72 8.97 1.57x +BenchmarkEncodeDigitsCompress1e4-4 12.99 17.25 1.33x +BenchmarkEncodeDigitsCompress1e5-4 6.47 9.74 1.51x +BenchmarkEncodeDigitsCompress1e6-4 5.71 9.08 1.59x +BenchmarkEncodeTwainSpeed1e4-4 17.85 38.36 2.15x +BenchmarkEncodeTwainSpeed1e5-4 26.73 47.68 1.78x +BenchmarkEncodeTwainSpeed1e6-4 28.14 49.13 1.75x +BenchmarkEncodeTwainDefault1e4-4 12.07 15.48 1.28x +BenchmarkEncodeTwainDefault1e5-4 7.32 9.94 1.36x +BenchmarkEncodeTwainDefault1e6-4 7.08 9.50 1.34x +BenchmarkEncodeTwainCompress1e4-4 12.05 15.57 1.29x +BenchmarkEncodeTwainCompress1e5-4 6.02 8.96 1.49x +BenchmarkEncodeTwainCompress1e6-4 5.17 8.21 1.59x +``` +So even without the assembly optimizations there is a general speedup across the board. + +## level 1-3 "snappy" compression + +Levels 1 "Best Speed", 2 and 3 are completely replaced by a converted version of the algorithm found in Snappy, modified to be fully +compatible with the deflate bitstream (and thus still compatible with all existing zlib/gzip libraries and tools). +This version is considerably faster than the "old" deflate at level 1. It does however come at a compression loss, usually in the order of 3-4% compared to the old level 1. However, the speed is usually 1.75 times that of the fastest deflate mode. + +In my previous experiments the most common case for "level 1" was that it provided no significant speedup, only lower compression compared to level 2 and sometimes even 3. However, the modified Snappy algorithm provides a very good sweet spot. Usually about 75% faster and with only little compression loss. Therefore I decided to *replace* level 1 with this mode entirely. + +Input is split into blocks of 64kb of, and they are encoded independently (no backreferences across blocks) for the best speed. Contrary to Snappy the output is entropy-encoded, so you will almost always see better compression than Snappy. But Snappy is still about twice as fast as Snappy in deflate mode. + +Level 2 and 3 have also been replaced. Level 2 is capable is matching between blocks and level 3 checks up to two hashes for matches it will try. + +## compression levels + +This table shows the compression at each level, and the percentage of the output size compared to output +at the similar level with the standard library. Compression data is `Twain`, see above. + +(Not up-to-date after rebalancing) + +| Level | Bytes | % size | +|-------|--------|--------| +| 1 | 194622 | 103.7% | +| 2 | 174684 | 96.85% | +| 3 | 170301 | 98.45% | +| 4 | 165253 | 97.69% | +| 5 | 161274 | 98.65% | +| 6 | 160464 | 99.71% | +| 7 | 160304 | 99.87% | +| 8 | 160279 | 99.99% | +| 9 | 160279 | 99.99% | + +To interpret and example, this version of deflate compresses input of 407287 bytes to 161274 bytes at level 5, which is 98.6% of the size of what the standard library produces; 161274 bytes. + +This means that from level 4 you can expect a compression level increase of a few percent. Level 1 is about 3% worse, as descibed above. + +# linear time compression (huffman only) + +This compression library adds a special compression level, named `ConstantCompression`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. + +This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). + +Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. + +The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression raio can never be better than 8:1 (12.5%). + +The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. + +For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). + +This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. + + +# gzip/zip optimizations + * Uses the faster deflate + * Uses SSE 4.2 CRC32 calculations. + +Speed increase is up to 3x of the standard library, but usually around 2x. + +This is close to a real world benchmark as you will get. A 2.3MB JSON file. (NOTE: not up-to-date) + +``` +benchmark old ns/op new ns/op delta +BenchmarkGzipL1-4 95212470 59938275 -37.05% +BenchmarkGzipL2-4 102069730 76349195 -25.20% +BenchmarkGzipL3-4 115472770 82492215 -28.56% +BenchmarkGzipL4-4 153197780 107570890 -29.78% +BenchmarkGzipL5-4 203930260 134387930 -34.10% +BenchmarkGzipL6-4 233172100 145495400 -37.60% +BenchmarkGzipL7-4 297190260 197926950 -33.40% +BenchmarkGzipL8-4 512819750 376244733 -26.63% +BenchmarkGzipL9-4 563366800 403266833 -28.42% + +benchmark old MB/s new MB/s speedup +BenchmarkGzipL1-4 52.11 82.78 1.59x +BenchmarkGzipL2-4 48.61 64.99 1.34x +BenchmarkGzipL3-4 42.97 60.15 1.40x +BenchmarkGzipL4-4 32.39 46.13 1.42x +BenchmarkGzipL5-4 24.33 36.92 1.52x +BenchmarkGzipL6-4 21.28 34.10 1.60x +BenchmarkGzipL7-4 16.70 25.07 1.50x +BenchmarkGzipL8-4 9.68 13.19 1.36x +BenchmarkGzipL9-4 8.81 12.30 1.40x +``` + +Multithreaded compression using [pgzip](https://github.com/klauspost/pgzip) comparison, Quadcore, CPU = 8: + +(Not updated, old numbers) + +``` +benchmark old ns/op new ns/op delta +BenchmarkGzipL1 96155500 25981486 -72.98% +BenchmarkGzipL2 101905830 24601408 -75.86% +BenchmarkGzipL3 113506490 26321506 -76.81% +BenchmarkGzipL4 143708220 31761818 -77.90% +BenchmarkGzipL5 188210770 39602266 -78.96% +BenchmarkGzipL6 209812000 40402313 -80.74% +BenchmarkGzipL7 270015440 56103210 -79.22% +BenchmarkGzipL8 461359700 91255220 -80.22% +BenchmarkGzipL9 498361833 88755075 -82.19% + +benchmark old MB/s new MB/s speedup +BenchmarkGzipL1 51.60 190.97 3.70x +BenchmarkGzipL2 48.69 201.69 4.14x +BenchmarkGzipL3 43.71 188.51 4.31x +BenchmarkGzipL4 34.53 156.22 4.52x +BenchmarkGzipL5 26.36 125.29 4.75x +BenchmarkGzipL6 23.65 122.81 5.19x +BenchmarkGzipL7 18.38 88.44 4.81x +BenchmarkGzipL8 10.75 54.37 5.06x +BenchmarkGzipL9 9.96 55.90 5.61x +``` + +# snappy package + +The standard snappy package has now been improved. This repo contains a copy of the snappy repo. + +I would advise to use the standard package: https://github.com/golang/snappy + + +# license + +This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/flate/asm_test.go b/vendor/github.com/klauspost/compress/flate/asm_test.go new file mode 100644 index 0000000000..40bf210f5f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/asm_test.go @@ -0,0 +1,193 @@ +// Copyright 2015, Klaus Post, see LICENSE for details. + +//+build amd64 + +package flate + +import ( + "math/rand" + "testing" +) + +func TestCRC(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping CRC test, no SSE 4.2 available") + } + for _, x := range deflateTests { + y := x.out + if len(y) >= minMatchLength { + t.Logf("In: %v, Out:0x%08x", y[0:minMatchLength], crc32sse(y[0:minMatchLength])) + } + } +} + +func TestCRCBulk(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping CRC test, no SSE 4.2 available") + } + for _, x := range deflateTests { + y := x.out + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + y = append(y, y...) + if !testing.Short() { + y = append(y, y...) + y = append(y, y...) + } + y = append(y, 1) + if len(y) >= minMatchLength { + for j := len(y) - 1; j >= 4; j-- { + + // Create copy, so we easier detect of-of-bound reads + test := make([]byte, j) + test2 := make([]byte, j) + copy(test, y[:j]) + copy(test2, y[:j]) + + // We allocate one more than we need to test for unintentional overwrites + dst := make([]uint32, j-3+1) + ref := make([]uint32, j-3+1) + for i := range dst { + dst[i] = uint32(i + 100) + ref[i] = uint32(i + 101) + } + // Last entry must NOT be overwritten. + dst[j-3] = 0x1234 + ref[j-3] = 0x1234 + + // Do two encodes we can compare + crc32sseAll(test, dst) + crc32sseAll(test2, ref) + + // Check all values + for i, got := range dst { + if i == j-3 { + if dst[i] != 0x1234 { + t.Fatalf("end of expected dst overwritten, was %08x", uint32(dst[i])) + } + continue + } + expect := crc32sse(y[i : i+4]) + if got != expect && got == uint32(i)+100 { + t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, uint32(expect)) + } else if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, uint32(got), uint32(expect)) + } + expect = ref[i] + if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect) + } + } + } + } + } +} + +func TestMatchLen(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping Matchlen test, no SSE 4.2 available") + } + // Maximum length tested + var maxLen = 512 + + // Skips per iteration + is, js, ks := 3, 2, 1 + if testing.Short() { + is, js, ks = 7, 5, 3 + } + + a := make([]byte, maxLen) + b := make([]byte, maxLen) + bb := make([]byte, maxLen) + rand.Seed(1) + for i := range a { + a[i] = byte(rand.Int63()) + b[i] = byte(rand.Int63()) + } + + // Test different lengths + for i := 0; i < maxLen; i += is { + // Test different dst offsets. + for j := 0; j < maxLen-1; j += js { + copy(bb, b) + // Test different src offsets + for k := i - 1; k >= 0; k -= ks { + copy(bb[j:], a[k:i]) + maxTest := maxLen - j + if maxTest > maxLen-k { + maxTest = maxLen - k + } + got := matchLenSSE4(a[k:], bb[j:], maxTest) + expect := matchLenReference(a[k:], bb[j:], maxTest) + if got > maxTest || got < 0 { + t.Fatalf("unexpected result %d (len:%d, src offset: %d, dst offset:%d)", got, maxTest, k, j) + } + if got != expect { + t.Fatalf("Mismatch, expected %d, got %d", expect, got) + } + } + } + } +} + +// matchLenReference is a reference matcher. +func matchLenReference(a, b []byte, max int) int { + for i := 0; i < max; i++ { + if a[i] != b[i] { + return i + } + } + return max +} + +func TestHistogram(t *testing.T) { + if !useSSE42 { + t.Skip("Skipping Matchlen test, no SSE 4.2 available") + } + // Maximum length tested + const maxLen = 65536 + var maxOff = 8 + + // Skips per iteration + is, js := 5, 3 + if testing.Short() { + is, js = 9, 1 + maxOff = 1 + } + + a := make([]byte, maxLen+maxOff) + rand.Seed(1) + for i := range a { + a[i] = byte(rand.Int63()) + } + + // Test different lengths + for i := 0; i <= maxLen; i += is { + // Test different offsets + for j := 0; j < maxOff; j += js { + var got [256]int32 + var reference [256]int32 + + histogram(a[j:i+j], got[:]) + histogramReference(a[j:i+j], reference[:]) + for k := range got { + if got[k] != reference[k] { + t.Fatalf("mismatch at len:%d, offset:%d, value %d: (got) %d != %d (expected)", i, j, k, got[k], reference[k]) + } + } + } + } +} + +// histogramReference is a reference +func histogramReference(b []byte, h []int32) { + if len(h) < 256 { + panic("Histogram too small") + } + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/copy_test.go b/vendor/github.com/klauspost/compress/flate/copy_test.go new file mode 100644 index 0000000000..2011b1547c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy_test.go @@ -0,0 +1,54 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "testing" +) + +func TestForwardCopy(t *testing.T) { + testCases := []struct { + dst0, dst1 int + src0, src1 int + want string + }{ + {0, 9, 0, 9, "012345678"}, + {0, 5, 4, 9, "45678"}, + {4, 9, 0, 5, "01230"}, + {1, 6, 3, 8, "34567"}, + {3, 8, 1, 6, "12121"}, + {0, 9, 3, 6, "345"}, + {3, 6, 0, 9, "012"}, + {1, 6, 0, 9, "00000"}, + {0, 4, 7, 8, "7"}, + {0, 1, 6, 8, "6"}, + {4, 4, 6, 9, ""}, + {2, 8, 6, 6, ""}, + {0, 0, 0, 0, ""}, + } + for _, tc := range testCases { + b := []byte("0123456789") + n := tc.dst1 - tc.dst0 + if tc.src1-tc.src0 < n { + n = tc.src1 - tc.src0 + } + forwardCopy(b, tc.dst0, tc.src0, n) + got := string(b[tc.dst0 : tc.dst0+n]) + if got != tc.want { + t.Errorf("dst=b[%d:%d], src=b[%d:%d]: got %q, want %q", + tc.dst0, tc.dst1, tc.src0, tc.src1, got, tc.want) + } + // Check that the bytes outside of dst[:n] were not modified. + for i, x := range b { + if i >= tc.dst0 && i < tc.dst0+n { + continue + } + if int(x) != '0'+i { + t.Errorf("dst=b[%d:%d], src=b[%d:%d]: copy overrun at b[%d]: got '%c', want '%c'", + tc.dst0, tc.dst1, tc.src0, tc.src1, i, x, '0'+i) + } + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate_test.go b/vendor/github.com/klauspost/compress/flate/deflate_test.go new file mode 100644 index 0000000000..ff62a607af --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate_test.go @@ -0,0 +1,648 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + "sync" + "testing" +) + +type deflateTest struct { + in []byte + level int + out []byte +} + +type deflateInflateTest struct { + in []byte +} + +type reverseBitsTest struct { + in uint16 + bitCount uint8 + out uint16 +} + +var deflateTests = []*deflateTest{ + {[]byte{}, 0, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + + {[]byte{0x11}, 0, []byte{0, 1, 0, 254, 255, 17, 1, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, 0, []byte{0, 2, 0, 253, 255, 17, 18, 1, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 0, + []byte{0, 8, 0, 247, 255, 17, 17, 17, 17, 17, 17, 17, 17, 1, 0, 0, 255, 255}, + }, + {[]byte{}, 1, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, BestCompression, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, BestCompression, []byte{18, 20, 2, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, BestCompression, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}}, + {[]byte{}, 9, []byte{1, 0, 0, 255, 255}}, + {[]byte{0x11}, 9, []byte{18, 4, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x12}, 9, []byte{18, 20, 2, 4, 0, 0, 255, 255}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}, 9, []byte{18, 132, 2, 64, 0, 0, 0, 255, 255}}, +} + +var deflateInflateTests = []*deflateInflateTest{ + {[]byte{}}, + {[]byte{0x11}}, + {[]byte{0x11, 0x12}}, + {[]byte{0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, 0x11}}, + {[]byte{0x11, 0x10, 0x13, 0x41, 0x21, 0x21, 0x41, 0x13, 0x87, 0x78, 0x13}}, + {largeDataChunk()}, +} + +var reverseBitsTests = []*reverseBitsTest{ + {1, 1, 1}, + {1, 2, 2}, + {1, 3, 4}, + {1, 4, 8}, + {1, 5, 16}, + {17, 5, 17}, + {257, 9, 257}, + {29, 5, 23}, +} + +func largeDataChunk() []byte { + result := make([]byte, 100000) + for i := range result { + result[i] = byte(i * i & 0xFF) + } + return result +} + +func TestCRCBulkOld(t *testing.T) { + for _, x := range deflateTests { + y := x.out + if len(y) >= minMatchLength { + y = append(y, y...) + for j := 4; j < len(y); j++ { + y := y[:j] + dst := make([]uint32, len(y)-minMatchLength+1) + for i := range dst { + dst[i] = uint32(i + 100) + } + bulkHash4(y, dst) + for i, val := range dst { + got := val + expect := hash4(y[i:]) + if got != expect && got == uint32(i)+100 { + t.Errorf("Len:%d Index:%d, expected 0x%08x but not modified", len(y), i, expect) + } else if got != expect { + t.Errorf("Len:%d Index:%d, got 0x%08x expected:0x%08x", len(y), i, got, expect) + } else { + //t.Logf("Len:%d Index:%d OK (0x%08x)", len(y), i, got) + } + } + } + } + } +} + +func TestDeflate(t *testing.T) { + for _, h := range deflateTests { + var buf bytes.Buffer + w, err := NewWriter(&buf, h.level) + if err != nil { + t.Errorf("NewWriter: %v", err) + continue + } + w.Write(h.in) + w.Close() + if !bytes.Equal(buf.Bytes(), h.out) { + t.Errorf("Deflate(%d, %x) = \n%#v, want \n%#v", h.level, h.in, buf.Bytes(), h.out) + } + } +} + +// A sparseReader returns a stream consisting of 0s followed by 1<<16 1s. +// This tests missing hash references in a very large input. +type sparseReader struct { + l int64 + cur int64 +} + +func (r *sparseReader) Read(b []byte) (n int, err error) { + if r.cur >= r.l { + return 0, io.EOF + } + n = len(b) + cur := r.cur + int64(n) + if cur > r.l { + n -= int(cur - r.l) + cur = r.l + } + for i := range b[0:n] { + if r.cur+int64(i) >= r.l-1<<16 { + b[i] = 1 + } else { + b[i] = 0 + } + } + r.cur = cur + return +} + +func TestVeryLongSparseChunk(t *testing.T) { + if testing.Short() { + t.Skip("skipping sparse chunk during short test") + } + w, err := NewWriter(ioutil.Discard, 1) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + if _, err = io.Copy(w, &sparseReader{l: 23E8}); err != nil { + t.Errorf("Compress failed: %v", err) + return + } +} + +type syncBuffer struct { + buf bytes.Buffer + mu sync.RWMutex + closed bool + ready chan bool +} + +func newSyncBuffer() *syncBuffer { + return &syncBuffer{ready: make(chan bool, 1)} +} + +func (b *syncBuffer) Read(p []byte) (n int, err error) { + for { + b.mu.RLock() + n, err = b.buf.Read(p) + b.mu.RUnlock() + if n > 0 || b.closed { + return + } + <-b.ready + } +} + +func (b *syncBuffer) signal() { + select { + case b.ready <- true: + default: + } +} + +func (b *syncBuffer) Write(p []byte) (n int, err error) { + n, err = b.buf.Write(p) + b.signal() + return +} + +func (b *syncBuffer) WriteMode() { + b.mu.Lock() +} + +func (b *syncBuffer) ReadMode() { + b.mu.Unlock() + b.signal() +} + +func (b *syncBuffer) Close() error { + b.closed = true + b.signal() + return nil +} + +func testSync(t *testing.T, level int, input []byte, name string) { + if len(input) == 0 { + return + } + + t.Logf("--testSync %d, %d, %s", level, len(input), name) + buf := newSyncBuffer() + buf1 := new(bytes.Buffer) + buf.WriteMode() + w, err := NewWriter(io.MultiWriter(buf, buf1), level) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + r := NewReader(buf) + + // Write half the input and read back. + for i := 0; i < 2; i++ { + var lo, hi int + if i == 0 { + lo, hi = 0, (len(input)+1)/2 + } else { + lo, hi = (len(input)+1)/2, len(input) + } + t.Logf("#%d: write %d-%d", i, lo, hi) + if _, err := w.Write(input[lo:hi]); err != nil { + t.Errorf("testSync: write: %v", err) + return + } + if i == 0 { + if err := w.Flush(); err != nil { + t.Errorf("testSync: flush: %v", err) + return + } + } else { + if err := w.Close(); err != nil { + t.Errorf("testSync: close: %v", err) + } + } + buf.ReadMode() + out := make([]byte, hi-lo+1) + m, err := io.ReadAtLeast(r, out, hi-lo) + t.Logf("#%d: read %d", i, m) + if m != hi-lo || err != nil { + t.Errorf("testSync/%d (%d, %d, %s): read %d: %d, %v (%d left)", i, level, len(input), name, hi-lo, m, err, buf.buf.Len()) + return + } + if !bytes.Equal(input[lo:hi], out[:hi-lo]) { + t.Errorf("testSync/%d: read wrong bytes: %x vs %x", i, input[lo:hi], out[:hi-lo]) + return + } + // This test originally checked that after reading + // the first half of the input, there was nothing left + // in the read buffer (buf.buf.Len() != 0) but that is + // not necessarily the case: the write Flush may emit + // some extra framing bits that are not necessary + // to process to obtain the first half of the uncompressed + // data. The test ran correctly most of the time, because + // the background goroutine had usually read even + // those extra bits by now, but it's not a useful thing to + // check. + buf.WriteMode() + } + buf.ReadMode() + out := make([]byte, 10) + if n, err := r.Read(out); n > 0 || err != io.EOF { + t.Errorf("testSync (%d, %d, %s): final Read: %d, %v (hex: %x)", level, len(input), name, n, err, out[0:n]) + } + if buf.buf.Len() != 0 { + t.Errorf("testSync (%d, %d, %s): extra data at end", level, len(input), name) + } + r.Close() + + // stream should work for ordinary reader too + r = NewReader(buf1) + out, err = ioutil.ReadAll(r) + if err != nil { + t.Errorf("testSync: read: %s", err) + return + } + r.Close() + if !bytes.Equal(input, out) { + t.Errorf("testSync: decompress(compress(data)) != data: level=%d input=%s", level, name) + } +} + +func testToFromWithLevelAndLimit(t *testing.T, level int, input []byte, name string, limit int) { + var buffer bytes.Buffer + w, err := NewWriter(&buffer, level) + if err != nil { + t.Errorf("NewWriter: %v", err) + return + } + w.Write(input) + w.Close() + if limit > 0 && buffer.Len() > limit { + t.Errorf("level: %d, len(compress(data)) = %d > limit = %d", level, buffer.Len(), limit) + return + } + if limit > 0 { + t.Logf("level: %d - Size:%.2f%%, %d b\n", level, float64(buffer.Len()*100)/float64(limit), buffer.Len()) + } + r := NewReader(&buffer) + out, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("read: %s", err) + return + } + r.Close() + if !bytes.Equal(input, out) { + t.Errorf("decompress(compress(data)) != data: level=%d input=%s", level, name) + return + } + testSync(t, level, input, name) +} + +func testToFromWithLimit(t *testing.T, input []byte, name string, limit [11]int) { + for i := 0; i < 10; i++ { + testToFromWithLevelAndLimit(t, i, input, name, limit[i]) + } + testToFromWithLevelAndLimit(t, -2, input, name, limit[10]) +} + +func TestDeflateInflate(t *testing.T) { + for i, h := range deflateInflateTests { + testToFromWithLimit(t, h.in, fmt.Sprintf("#%d", i), [11]int{}) + } +} + +func TestReverseBits(t *testing.T) { + for _, h := range reverseBitsTests { + if v := reverseBits(h.in, h.bitCount); v != h.out { + t.Errorf("reverseBits(%v,%v) = %v, want %v", + h.in, h.bitCount, v, h.out) + } + } +} + +type deflateInflateStringTest struct { + filename string + label string + limit [11]int // Number 11 is ConstantCompression +} + +var deflateInflateStringTests = []deflateInflateStringTest{ + { + "../testdata/e.txt", + "2.718281828...", + [...]int{100018, 67900, 50960, 51150, 50930, 50790, 50790, 50790, 50790, 50790, 43683 + 100}, + }, + { + "../testdata/Mark.Twain-Tom.Sawyer.txt", + "Mark.Twain-Tom.Sawyer", + [...]int{387999, 185000, 182361, 179974, 174124, 168819, 162936, 160506, 160295, 160295, 233460 + 100}, + }, +} + +func TestDeflateInflateString(t *testing.T) { + for _, test := range deflateInflateStringTests { + gold, err := ioutil.ReadFile(test.filename) + if err != nil { + t.Error(err) + } + // Remove returns that may be present on Windows + neutral := strings.Map(func(r rune) rune { + if r != '\r' { + return r + } + return -1 + }, string(gold)) + + testToFromWithLimit(t, []byte(neutral), test.label, test.limit) + + if testing.Short() { + break + } + } +} + +func TestReaderDict(t *testing.T) { + const ( + dict = "hello world" + text = "hello again world" + ) + var b bytes.Buffer + w, err := NewWriter(&b, 5) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + w.Write([]byte(dict)) + w.Flush() + b.Reset() + w.Write([]byte(text)) + w.Close() + + r := NewReaderDict(&b, []byte(dict)) + data, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if string(data) != "hello again world" { + t.Fatalf("read returned %q want %q", string(data), text) + } +} + +func TestWriterDict(t *testing.T) { + const ( + dict = "hello world Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua." + text = "hello world Lorem ipsum dolor sit amet" + ) + // This test is sensitive to algorithm changes that skip + // data in favour of speed. Higher levels are less prone to this + // so we test level 4-9. + for l := 4; l < 9; l++ { + var b bytes.Buffer + w, err := NewWriter(&b, l) + if err != nil { + t.Fatalf("level %d, NewWriter: %v", l, err) + } + w.Write([]byte(dict)) + w.Flush() + b.Reset() + w.Write([]byte(text)) + w.Close() + + var b1 bytes.Buffer + w, _ = NewWriterDict(&b1, l, []byte(dict)) + w.Write([]byte(text)) + w.Close() + + if !bytes.Equal(b1.Bytes(), b.Bytes()) { + t.Errorf("level %d, writer wrote\n%v\n want\n%v", l, b1.Bytes(), b.Bytes()) + } + } +} + +// See http://code.google.com/p/go/issues/detail?id=2508 +func TestRegression2508(t *testing.T) { + if testing.Short() { + t.Logf("test disabled with -short") + return + } + w, err := NewWriter(ioutil.Discard, 1) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + buf := make([]byte, 1024) + for i := 0; i < 131072; i++ { + if _, err := w.Write(buf); err != nil { + t.Fatalf("writer failed: %v", err) + } + } + w.Close() +} + +func TestWriterReset(t *testing.T) { + for level := -2; level <= 9; level++ { + if level == -1 { + level++ + } + if testing.Short() && level > 1 { + break + } + w, err := NewWriter(ioutil.Discard, level) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + buf := []byte("hello world") + for i := 0; i < 1024; i++ { + w.Write(buf) + } + w.Reset(ioutil.Discard) + + wref, err := NewWriter(ioutil.Discard, level) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + + // DeepEqual doesn't compare functions. + w.d.fill, wref.d.fill = nil, nil + w.d.step, wref.d.step = nil, nil + w.d.bulkHasher, wref.d.bulkHasher = nil, nil + w.d.snap, wref.d.snap = nil, nil + + // hashMatch is always overwritten when used. + copy(w.d.hashMatch[:], wref.d.hashMatch[:]) + if w.d.tokens.n != 0 { + t.Errorf("level %d Writer not reset after Reset. %d tokens were present", level, w.d.tokens.n) + } + // As long as the length is 0, we don't care about the content. + w.d.tokens = wref.d.tokens + + // We don't care if there are values in the window, as long as it is at d.index is 0 + w.d.window = wref.d.window + if !reflect.DeepEqual(w, wref) { + t.Errorf("level %d Writer not reset after Reset", level) + } + } + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, NoCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, DefaultCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, BestCompression) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriter(w, ConstantCompression) }) + dict := []byte("we are the world") + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, NoCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, DefaultCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, BestCompression, dict) }) + testResetOutput(t, func(w io.Writer) (*Writer, error) { return NewWriterDict(w, ConstantCompression, dict) }) +} + +func testResetOutput(t *testing.T, newWriter func(w io.Writer) (*Writer, error)) { + buf := new(bytes.Buffer) + w, err := newWriter(buf) + if err != nil { + t.Fatalf("NewWriter: %v", err) + } + b := []byte("hello world") + for i := 0; i < 1024; i++ { + w.Write(b) + } + w.Close() + out1 := buf.Bytes() + + buf2 := new(bytes.Buffer) + w.Reset(buf2) + for i := 0; i < 1024; i++ { + w.Write(b) + } + w.Close() + out2 := buf2.Bytes() + + if len(out1) != len(out2) { + t.Errorf("got %d, expected %d bytes", len(out2), len(out1)) + } + if bytes.Compare(out1, out2) != 0 { + mm := 0 + for i, b := range out1[:len(out2)] { + if b != out2[i] { + t.Errorf("mismatch index %d: %02x, expected %02x", i, out2[i], b) + } + mm++ + if mm == 10 { + t.Fatal("Stopping") + } + } + } + t.Logf("got %d bytes", len(out1)) +} + +// TestBestSpeed tests that round-tripping through deflate and then inflate +// recovers the original input. The Write sizes are near the thresholds in the +// compressor.encSpeed method (0, 16, 128), as well as near maxStoreBlockSize +// (65535). +func TestBestSpeed(t *testing.T) { + abc := make([]byte, 128) + for i := range abc { + abc[i] = byte(i) + } + abcabc := bytes.Repeat(abc, 131072/len(abc)) + var want []byte + + testCases := [][]int{ + {65536, 0}, + {65536, 1}, + {65536, 1, 256}, + {65536, 1, 65536}, + {65536, 14}, + {65536, 15}, + {65536, 16}, + {65536, 16, 256}, + {65536, 16, 65536}, + {65536, 127}, + {65536, 128}, + {65536, 128, 256}, + {65536, 128, 65536}, + {65536, 129}, + {65536, 65536, 256}, + {65536, 65536, 65536}, + } + + for i, tc := range testCases { + for _, firstN := range []int{1, 65534, 65535, 65536, 65537, 131072} { + tc[0] = firstN + outer: + for _, flush := range []bool{false, true} { + buf := new(bytes.Buffer) + want = want[:0] + + w, err := NewWriter(buf, BestSpeed) + if err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: NewWriter: %v", i, firstN, flush, err) + continue + } + for _, n := range tc { + want = append(want, abcabc[:n]...) + if _, err := w.Write(abcabc[:n]); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Write: %v", i, firstN, flush, err) + continue outer + } + if !flush { + continue + } + if err := w.Flush(); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Flush: %v", i, firstN, flush, err) + continue outer + } + } + if err := w.Close(); err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: Close: %v", i, firstN, flush, err) + continue + } + + r := NewReader(buf) + got, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("i=%d, firstN=%d, flush=%t: ReadAll: %v", i, firstN, flush, err) + continue + } + r.Close() + + if !bytes.Equal(got, want) { + t.Errorf("i=%d, firstN=%d, flush=%t: corruption during deflate-then-inflate", i, firstN, flush) + continue + } + } + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go new file mode 100644 index 0000000000..9275cff791 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder_test.go @@ -0,0 +1,139 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "strings" + "testing" +) + +func TestDictDecoder(t *testing.T) { + const ( + abc = "ABC\n" + fox = "The quick brown fox jumped over the lazy dog!\n" + poem = "The Road Not Taken\nRobert Frost\n" + + "\n" + + "Two roads diverged in a yellow wood,\n" + + "And sorry I could not travel both\n" + + "And be one traveler, long I stood\n" + + "And looked down one as far as I could\n" + + "To where it bent in the undergrowth;\n" + + "\n" + + "Then took the other, as just as fair,\n" + + "And having perhaps the better claim,\n" + + "Because it was grassy and wanted wear;\n" + + "Though as for that the passing there\n" + + "Had worn them really about the same,\n" + + "\n" + + "And both that morning equally lay\n" + + "In leaves no step had trodden black.\n" + + "Oh, I kept the first for another day!\n" + + "Yet knowing how way leads on to way,\n" + + "I doubted if I should ever come back.\n" + + "\n" + + "I shall be telling this with a sigh\n" + + "Somewhere ages and ages hence:\n" + + "Two roads diverged in a wood, and I-\n" + + "I took the one less traveled by,\n" + + "And that has made all the difference.\n" + ) + + var poemRefs = []struct { + dist int // Backward distance (0 if this is an insertion) + length int // Length of copy or insertion + }{ + {0, 38}, {33, 3}, {0, 48}, {79, 3}, {0, 11}, {34, 5}, {0, 6}, {23, 7}, + {0, 8}, {50, 3}, {0, 2}, {69, 3}, {34, 5}, {0, 4}, {97, 3}, {0, 4}, + {43, 5}, {0, 6}, {7, 4}, {88, 7}, {0, 12}, {80, 3}, {0, 2}, {141, 4}, + {0, 1}, {196, 3}, {0, 3}, {157, 3}, {0, 6}, {181, 3}, {0, 2}, {23, 3}, + {77, 3}, {28, 5}, {128, 3}, {110, 4}, {70, 3}, {0, 4}, {85, 6}, {0, 2}, + {182, 6}, {0, 4}, {133, 3}, {0, 7}, {47, 5}, {0, 20}, {112, 5}, {0, 1}, + {58, 3}, {0, 8}, {59, 3}, {0, 4}, {173, 3}, {0, 5}, {114, 3}, {0, 4}, + {92, 5}, {0, 2}, {71, 3}, {0, 2}, {76, 5}, {0, 1}, {46, 3}, {96, 4}, + {130, 4}, {0, 3}, {360, 3}, {0, 3}, {178, 5}, {0, 7}, {75, 3}, {0, 3}, + {45, 6}, {0, 6}, {299, 6}, {180, 3}, {70, 6}, {0, 1}, {48, 3}, {66, 4}, + {0, 3}, {47, 5}, {0, 9}, {325, 3}, {0, 1}, {359, 3}, {318, 3}, {0, 2}, + {199, 3}, {0, 1}, {344, 3}, {0, 3}, {248, 3}, {0, 10}, {310, 3}, {0, 3}, + {93, 6}, {0, 3}, {252, 3}, {157, 4}, {0, 2}, {273, 5}, {0, 14}, {99, 4}, + {0, 1}, {464, 4}, {0, 2}, {92, 4}, {495, 3}, {0, 1}, {322, 4}, {16, 4}, + {0, 3}, {402, 3}, {0, 2}, {237, 4}, {0, 2}, {432, 4}, {0, 1}, {483, 5}, + {0, 2}, {294, 4}, {0, 2}, {306, 3}, {113, 5}, {0, 1}, {26, 4}, {164, 3}, + {488, 4}, {0, 1}, {542, 3}, {248, 6}, {0, 5}, {205, 3}, {0, 8}, {48, 3}, + {449, 6}, {0, 2}, {192, 3}, {328, 4}, {9, 5}, {433, 3}, {0, 3}, {622, 25}, + {615, 5}, {46, 5}, {0, 2}, {104, 3}, {475, 10}, {549, 3}, {0, 4}, {597, 8}, + {314, 3}, {0, 1}, {473, 6}, {317, 5}, {0, 1}, {400, 3}, {0, 3}, {109, 3}, + {151, 3}, {48, 4}, {0, 4}, {125, 3}, {108, 3}, {0, 2}, + } + + var got, want bytes.Buffer + var dd dictDecoder + dd.init(1<<11, nil) + + var writeCopy = func(dist, length int) { + for length > 0 { + cnt := dd.tryWriteCopy(dist, length) + if cnt == 0 { + cnt = dd.writeCopy(dist, length) + } + + length -= cnt + if dd.availWrite() == 0 { + got.Write(dd.readFlush()) + } + } + } + var writeString = func(str string) { + for len(str) > 0 { + cnt := copy(dd.writeSlice(), str) + str = str[cnt:] + dd.writeMark(cnt) + if dd.availWrite() == 0 { + got.Write(dd.readFlush()) + } + } + } + + writeString(".") + want.WriteByte('.') + + str := poem + for _, ref := range poemRefs { + if ref.dist == 0 { + writeString(str[:ref.length]) + } else { + writeCopy(ref.dist, ref.length) + } + str = str[ref.length:] + } + want.WriteString(poem) + + writeCopy(dd.histSize(), 33) + want.Write(want.Bytes()[:33]) + + writeString(abc) + writeCopy(len(abc), 59*len(abc)) + want.WriteString(strings.Repeat(abc, 60)) + + writeString(fox) + writeCopy(len(fox), 9*len(fox)) + want.WriteString(strings.Repeat(fox, 10)) + + writeString(".") + writeCopy(1, 9) + want.WriteString(strings.Repeat(".", 10)) + + writeString(strings.ToUpper(poem)) + writeCopy(len(poem), 7*len(poem)) + want.WriteString(strings.Repeat(strings.ToUpper(poem), 8)) + + writeCopy(dd.histSize(), 10) + want.Write(want.Bytes()[want.Len()-dd.histSize():][:10]) + + got.Write(dd.readFlush()) + if got.String() != want.String() { + t.Errorf("final string mismatch:\ngot %q\nwant %q", got.String(), want.String()) + } +} diff --git a/vendor/github.com/klauspost/compress/flate/flate_test.go b/vendor/github.com/klauspost/compress/flate/flate_test.go new file mode 100644 index 0000000000..3f67025cd7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/flate_test.go @@ -0,0 +1,260 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This test tests some internals of the flate package. +// The tests in package compress/gzip serve as the +// end-to-end test of the decompressor. + +package flate + +import ( + "bytes" + "encoding/hex" + "io/ioutil" + "testing" +) + +// The following test should not panic. +func TestIssue5915(t *testing.T) { + bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, 5, 5, 6, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 6, 0, 11, 0, 8, 0, 6, 6, 10, 8} + var h huffmanDecoder + if h.init(bits) { + t.Fatalf("Given sequence of bits is bad, and should not succeed.") + } +} + +// The following test should not panic. +func TestIssue5962(t *testing.T) { + bits := []int{4, 0, 0, 6, 4, 3, 2, 3, 3, 4, 4, 5, 0, 0, 0, 0, + 5, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11} + var h huffmanDecoder + if h.init(bits) { + t.Fatalf("Given sequence of bits is bad, and should not succeed.") + } +} + +// The following test should not panic. +func TestIssue6255(t *testing.T) { + bits1 := []int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 11} + bits2 := []int{11, 13} + var h huffmanDecoder + if !h.init(bits1) { + t.Fatalf("Given sequence of bits is good and should succeed.") + } + if h.init(bits2) { + t.Fatalf("Given sequence of bits is bad and should not succeed.") + } +} + +func TestInvalidEncoding(t *testing.T) { + // Initialize Huffman decoder to recognize "0". + var h huffmanDecoder + if !h.init([]int{1}) { + t.Fatal("Failed to initialize Huffman decoder") + } + + // Initialize decompressor with invalid Huffman coding. + var f decompressor + f.r = bytes.NewReader([]byte{0xff}) + + _, err := f.huffSym(&h) + if err == nil { + t.Fatal("Should have rejected invalid bit sequence") + } +} + +func TestInvalidBits(t *testing.T) { + oversubscribed := []int{1, 2, 3, 4, 4, 5} + incomplete := []int{1, 2, 4, 4} + var h huffmanDecoder + if h.init(oversubscribed) { + t.Fatal("Should reject oversubscribed bit-length set") + } + if h.init(incomplete) { + t.Fatal("Should reject incomplete bit-length set") + } +} + +func TestStreams(t *testing.T) { + // To verify any of these hexstrings as valid or invalid flate streams + // according to the C zlib library, you can use the Python wrapper library: + // >>> hex_string = "010100feff11" + // >>> import zlib + // >>> zlib.decompress(hex_string.decode("hex"), -15) # Negative means raw DEFLATE + // '\x11' + + testCases := []struct { + desc string // Description of the stream + stream string // Hexstring of the input DEFLATE stream + want string // Expected result. Use "fail" to expect failure + }{{ + "degenerate HCLenTree", + "05e0010000000000100000000000000000000000000000000000000000000000" + + "00000000000000000004", + "fail", + }, { + "complete HCLenTree, empty HLitTree, empty HDistTree", + "05e0010400000000000000000000000000000000000000000000000000000000" + + "00000000000000000010", + "fail", + }, { + "empty HCLenTree", + "05e0010000000000000000000000000000000000000000000000000000000000" + + "00000000000000000010", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, use missing HDist symbol", + "000100feff000de0010400000000100000000000000000000000000000000000" + + "0000000000000000000000000000002c", + "fail", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use missing HDist symbol", + "000100feff000de0010000000000000000000000000000000000000000000000" + + "00000000000000000610000000004070", + "fail", + }, { + "complete HCLenTree, empty HLitTree, empty HDistTree", + "05e0010400000000100400000000000000000000000000000000000000000000" + + "0000000000000000000000000008", + "fail", + }, { + "complete HCLenTree, empty HLitTree, degenerate HDistTree", + "05e0010400000000100400000000000000000000000000000000000000000000" + + "0000000000000000000800000008", + "fail", + }, { + "complete HCLenTree, degenerate HLitTree, degenerate HDistTree, use missing HLit symbol", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000001c", + "fail", + }, { + "complete HCLenTree, complete HLitTree, too large HDistTree", + "edff870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000080000000000000004", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, excessive repeater code", + "edfd870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000e8b100", + "fail", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree of normal length 30", + "05fd01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07000000fe01", + "", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree of excessive length 31", + "05fe01240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07000000fc03", + "fail", + }, { + "complete HCLenTree, over-subscribed HLitTree, empty HDistTree", + "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07f00f", + "fail", + }, { + "complete HCLenTree, under-subscribed HLitTree, empty HDistTree", + "05e001240000000000fcffffffffffffffffffffffffffffffffffffffffffff" + + "fffffffffcffffffff07f00f", + "fail", + }, { + "complete HCLenTree, complete HLitTree with single code, empty HDistTree", + "05e001240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07f00f", + "01", + }, { + "complete HCLenTree, complete HLitTree with multiple codes, empty HDistTree", + "05e301240000000000f8ffffffffffffffffffffffffffffffffffffffffffff" + + "ffffffffffffffffff07807f", + "01", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HDist symbol", + "000100feff000de0010400000000100000000000000000000000000000000000" + + "0000000000000000000000000000003c", + "00000000", + }, { + "complete HCLenTree, degenerate HLitTree, degenerate HDistTree", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000000c", + "", + }, { + "complete HCLenTree, degenerate HLitTree, empty HDistTree", + "05e0010400000000100000000000000000000000000000000000000000000000" + + "00000000000000000004", + "", + }, { + "complete HCLenTree, complete HLitTree, empty HDistTree, spanning repeater code", + "edfd870500000000200400000000000000000000000000000000000000000000" + + "000000000000000000e8b000", + "", + }, { + "complete HCLenTree with length codes, complete HLitTree, empty HDistTree", + "ede0010400000000100000000000000000000000000000000000000000000000" + + "0000000000000000000400004000", + "", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit symbol 284 with count 31", + "000100feff00ede0010400000000100000000000000000000000000000000000" + + "000000000000000000000000000000040000407f00", + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "0000000000000000000000000000000000000000000000000000000000000000" + + "000000", + }, { + "complete HCLenTree, complete HLitTree, degenerate HDistTree, use valid HLit and HDist symbols", + "0cc2010d00000082b0ac4aff0eb07d27060000ffff", + "616263616263", + }, { + "fixed block, use reserved symbol 287", + "33180700", + "fail", + }, { + "raw block", + "010100feff11", + "11", + }, { + "issue 10426 - over-subscribed HCLenTree causes a hang", + "344c4a4e494d4b070000ff2e2eff2e2e2e2e2eff", + "fail", + }, { + "issue 11030 - empty HDistTree unexpectedly leads to error", + "05c0070600000080400fff37a0ca", + "", + }, { + "issue 11033 - empty HDistTree unexpectedly leads to error", + "050fb109c020cca5d017dcbca044881ee1034ec149c8980bbc413c2ab35be9dc" + + "b1473449922449922411202306ee97b0383a521b4ffdcf3217f9f7d3adb701", + "3130303634342068652e706870005d05355f7ed957ff084a90925d19e3ebc6d0" + + "c6d7", + }} + + for i, tc := range testCases { + data, err := hex.DecodeString(tc.stream) + if err != nil { + t.Fatal(err) + } + data, err = ioutil.ReadAll(NewReader(bytes.NewReader(data))) + if tc.want == "fail" { + if err == nil { + t.Errorf("#%d (%s): got nil error, want non-nil", i, tc.desc) + } + } else { + if err != nil { + t.Errorf("#%d (%s): %v", i, tc.desc, err) + continue + } + if got := hex.EncodeToString(data); got != tc.want { + t.Errorf("#%d (%s):\ngot %q\nwant %q", i, tc.desc, got, tc.want) + } + + } + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go new file mode 100644 index 0000000000..882d3abec1 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer_test.go @@ -0,0 +1,366 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "flag" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +var update = flag.Bool("update", false, "update reference files") + +// TestBlockHuff tests huffman encoding against reference files +// to detect possible regressions. +// If encoding/bit allocation changes you can regenerate these files +// by using the -update flag. +func TestBlockHuff(t *testing.T) { + // determine input files + match, err := filepath.Glob("testdata/huffman-*.in") + if err != nil { + t.Fatal(err) + } + + for _, in := range match { + out := in // for files where input and output are identical + if strings.HasSuffix(in, ".in") { + out = in[:len(in)-len(".in")] + ".golden" + } + testBlockHuff(t, in, out) + } +} + +func testBlockHuff(t *testing.T, in, out string) { + all, err := ioutil.ReadFile(in) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + bw.writeBlockHuff(false, all) + bw.flush() + got := buf.Bytes() + + want, err := ioutil.ReadFile(out) + if err != nil && !*update { + t.Error(err) + return + } + + t.Logf("Testing %q", in) + if !bytes.Equal(got, want) { + if *update { + if in != out { + t.Logf("Updating %q", out) + if err := ioutil.WriteFile(out, got, 0666); err != nil { + t.Error(err) + } + return + } + // in == out: don't accidentally destroy input + t.Errorf("WARNING: -update did not rewrite input file %s", in) + } + + t.Errorf("%q != %q (see %q)", in, out, in+".got") + if err := ioutil.WriteFile(in+".got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + bw.writeBlockHuff(false, all) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("after reset %q != %q (see %q)", in, out, in+".reset.got") + if err := ioutil.WriteFile(in+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "huff", huffTest{input: in}, true) +} + +type huffTest struct { + tokens []token + input string // File name of input data matching the tokens. + want string // File name of data with the expected output with input available. + wantNoInput string // File name of the expected output when no input is available. +} + +const ml = 0x7fc00000 // Maximum length token. Used to reduce the size of writeBlockTests + +var writeBlockTests = []huffTest{ + { + input: "testdata/huffman-null-max.in", + want: "testdata/huffman-null-max.%s.expect", + wantNoInput: "testdata/huffman-null-max.%s.expect-noinput", + tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x0, 0x0}, + }, + { + input: "testdata/huffman-pi.in", + want: "testdata/huffman-pi.%s.expect", + wantNoInput: "testdata/huffman-pi.%s.expect-noinput", + tokens: []token{0x33, 0x2e, 0x31, 0x34, 0x31, 0x35, 0x39, 0x32, 0x36, 0x35, 0x33, 0x35, 0x38, 0x39, 0x37, 0x39, 0x33, 0x32, 0x33, 0x38, 0x34, 0x36, 0x32, 0x36, 0x34, 0x33, 0x33, 0x38, 0x33, 0x32, 0x37, 0x39, 0x35, 0x30, 0x32, 0x38, 0x38, 0x34, 0x31, 0x39, 0x37, 0x31, 0x36, 0x39, 0x33, 0x39, 0x39, 0x33, 0x37, 0x35, 0x31, 0x30, 0x35, 0x38, 0x32, 0x30, 0x39, 0x37, 0x34, 0x39, 0x34, 0x34, 0x35, 0x39, 0x32, 0x33, 0x30, 0x37, 0x38, 0x31, 0x36, 0x34, 0x30, 0x36, 0x32, 0x38, 0x36, 0x32, 0x30, 0x38, 0x39, 0x39, 0x38, 0x36, 0x32, 0x38, 0x30, 0x33, 0x34, 0x38, 0x32, 0x35, 0x33, 0x34, 0x32, 0x31, 0x31, 0x37, 0x30, 0x36, 0x37, 0x39, 0x38, 0x32, 0x31, 0x34, 0x38, 0x30, 0x38, 0x36, 0x35, 0x31, 0x33, 0x32, 0x38, 0x32, 0x33, 0x30, 0x36, 0x36, 0x34, 0x37, 0x30, 0x39, 0x33, 0x38, 0x34, 0x34, 0x36, 0x30, 0x39, 0x35, 0x35, 0x30, 0x35, 0x38, 0x32, 0x32, 0x33, 0x31, 0x37, 0x32, 0x35, 0x33, 0x35, 0x39, 0x34, 0x30, 0x38, 0x31, 0x32, 0x38, 0x34, 0x38, 0x31, 0x31, 0x31, 0x37, 0x34, 0x4040007e, 0x34, 0x31, 0x30, 0x32, 0x37, 0x30, 0x31, 0x39, 0x33, 0x38, 0x35, 0x32, 0x31, 0x31, 0x30, 0x35, 0x35, 0x35, 0x39, 0x36, 0x34, 0x34, 0x36, 0x32, 0x32, 0x39, 0x34, 0x38, 0x39, 0x35, 0x34, 0x39, 0x33, 0x30, 0x33, 0x38, 0x31, 0x40400012, 0x32, 0x38, 0x38, 0x31, 0x30, 0x39, 0x37, 0x35, 0x36, 0x36, 0x35, 0x39, 0x33, 0x33, 0x34, 0x34, 0x36, 0x40400047, 0x37, 0x35, 0x36, 0x34, 0x38, 0x32, 0x33, 0x33, 0x37, 0x38, 0x36, 0x37, 0x38, 0x33, 0x31, 0x36, 0x35, 0x32, 0x37, 0x31, 0x32, 0x30, 0x31, 0x39, 0x30, 0x39, 0x31, 0x34, 0x4040001a, 0x35, 0x36, 0x36, 0x39, 0x32, 0x33, 0x34, 0x36, 0x404000b2, 0x36, 0x31, 0x30, 0x34, 0x35, 0x34, 0x33, 0x32, 0x36, 0x40400032, 0x31, 0x33, 0x33, 0x39, 0x33, 0x36, 0x30, 0x37, 0x32, 0x36, 0x30, 0x32, 0x34, 0x39, 0x31, 0x34, 0x31, 0x32, 0x37, 0x33, 0x37, 0x32, 0x34, 0x35, 0x38, 0x37, 0x30, 0x30, 0x36, 0x36, 0x30, 0x36, 0x33, 0x31, 0x35, 0x35, 0x38, 0x38, 0x31, 0x37, 0x34, 0x38, 0x38, 0x31, 0x35, 0x32, 0x30, 0x39, 0x32, 0x30, 0x39, 0x36, 0x32, 0x38, 0x32, 0x39, 0x32, 0x35, 0x34, 0x30, 0x39, 0x31, 0x37, 0x31, 0x35, 0x33, 0x36, 0x34, 0x33, 0x36, 0x37, 0x38, 0x39, 0x32, 0x35, 0x39, 0x30, 0x33, 0x36, 0x30, 0x30, 0x31, 0x31, 0x33, 0x33, 0x30, 0x35, 0x33, 0x30, 0x35, 0x34, 0x38, 0x38, 0x32, 0x30, 0x34, 0x36, 0x36, 0x35, 0x32, 0x31, 0x33, 0x38, 0x34, 0x31, 0x34, 0x36, 0x39, 0x35, 0x31, 0x39, 0x34, 0x31, 0x35, 0x31, 0x31, 0x36, 0x30, 0x39, 0x34, 0x33, 0x33, 0x30, 0x35, 0x37, 0x32, 0x37, 0x30, 0x33, 0x36, 0x35, 0x37, 0x35, 0x39, 0x35, 0x39, 0x31, 0x39, 0x35, 0x33, 0x30, 0x39, 0x32, 0x31, 0x38, 0x36, 0x31, 0x31, 0x37, 0x404000e9, 0x33, 0x32, 0x40400009, 0x39, 0x33, 0x31, 0x30, 0x35, 0x31, 0x31, 0x38, 0x35, 0x34, 0x38, 0x30, 0x37, 0x4040010e, 0x33, 0x37, 0x39, 0x39, 0x36, 0x32, 0x37, 0x34, 0x39, 0x35, 0x36, 0x37, 0x33, 0x35, 0x31, 0x38, 0x38, 0x35, 0x37, 0x35, 0x32, 0x37, 0x32, 0x34, 0x38, 0x39, 0x31, 0x32, 0x32, 0x37, 0x39, 0x33, 0x38, 0x31, 0x38, 0x33, 0x30, 0x31, 0x31, 0x39, 0x34, 0x39, 0x31, 0x32, 0x39, 0x38, 0x33, 0x33, 0x36, 0x37, 0x33, 0x33, 0x36, 0x32, 0x34, 0x34, 0x30, 0x36, 0x35, 0x36, 0x36, 0x34, 0x33, 0x30, 0x38, 0x36, 0x30, 0x32, 0x31, 0x33, 0x39, 0x34, 0x39, 0x34, 0x36, 0x33, 0x39, 0x35, 0x32, 0x32, 0x34, 0x37, 0x33, 0x37, 0x31, 0x39, 0x30, 0x37, 0x30, 0x32, 0x31, 0x37, 0x39, 0x38, 0x40800099, 0x37, 0x30, 0x32, 0x37, 0x37, 0x30, 0x35, 0x33, 0x39, 0x32, 0x31, 0x37, 0x31, 0x37, 0x36, 0x32, 0x39, 0x33, 0x31, 0x37, 0x36, 0x37, 0x35, 0x40800232, 0x37, 0x34, 0x38, 0x31, 0x40400006, 0x36, 0x36, 0x39, 0x34, 0x30, 0x404001e7, 0x30, 0x30, 0x30, 0x35, 0x36, 0x38, 0x31, 0x32, 0x37, 0x31, 0x34, 0x35, 0x32, 0x36, 0x33, 0x35, 0x36, 0x30, 0x38, 0x32, 0x37, 0x37, 0x38, 0x35, 0x37, 0x37, 0x31, 0x33, 0x34, 0x32, 0x37, 0x35, 0x37, 0x37, 0x38, 0x39, 0x36, 0x40400129, 0x33, 0x36, 0x33, 0x37, 0x31, 0x37, 0x38, 0x37, 0x32, 0x31, 0x34, 0x36, 0x38, 0x34, 0x34, 0x30, 0x39, 0x30, 0x31, 0x32, 0x32, 0x34, 0x39, 0x35, 0x33, 0x34, 0x33, 0x30, 0x31, 0x34, 0x36, 0x35, 0x34, 0x39, 0x35, 0x38, 0x35, 0x33, 0x37, 0x31, 0x30, 0x35, 0x30, 0x37, 0x39, 0x404000ca, 0x36, 0x40400153, 0x38, 0x39, 0x32, 0x33, 0x35, 0x34, 0x404001c9, 0x39, 0x35, 0x36, 0x31, 0x31, 0x32, 0x31, 0x32, 0x39, 0x30, 0x32, 0x31, 0x39, 0x36, 0x30, 0x38, 0x36, 0x34, 0x30, 0x33, 0x34, 0x34, 0x31, 0x38, 0x31, 0x35, 0x39, 0x38, 0x31, 0x33, 0x36, 0x32, 0x39, 0x37, 0x37, 0x34, 0x40400074, 0x30, 0x39, 0x39, 0x36, 0x30, 0x35, 0x31, 0x38, 0x37, 0x30, 0x37, 0x32, 0x31, 0x31, 0x33, 0x34, 0x39, 0x40800000, 0x38, 0x33, 0x37, 0x32, 0x39, 0x37, 0x38, 0x30, 0x34, 0x39, 0x39, 0x404002da, 0x39, 0x37, 0x33, 0x31, 0x37, 0x33, 0x32, 0x38, 0x4040018a, 0x36, 0x33, 0x31, 0x38, 0x35, 0x40400301, 0x404002e8, 0x34, 0x35, 0x35, 0x33, 0x34, 0x36, 0x39, 0x30, 0x38, 0x33, 0x30, 0x32, 0x36, 0x34, 0x32, 0x35, 0x32, 0x32, 0x33, 0x30, 0x404002e3, 0x40400267, 0x38, 0x35, 0x30, 0x33, 0x35, 0x32, 0x36, 0x31, 0x39, 0x33, 0x31, 0x31, 0x40400212, 0x31, 0x30, 0x31, 0x30, 0x30, 0x30, 0x33, 0x31, 0x33, 0x37, 0x38, 0x33, 0x38, 0x37, 0x35, 0x32, 0x38, 0x38, 0x36, 0x35, 0x38, 0x37, 0x35, 0x33, 0x33, 0x32, 0x30, 0x38, 0x33, 0x38, 0x31, 0x34, 0x32, 0x30, 0x36, 0x40400140, 0x4040012b, 0x31, 0x34, 0x37, 0x33, 0x30, 0x33, 0x35, 0x39, 0x4080032e, 0x39, 0x30, 0x34, 0x32, 0x38, 0x37, 0x35, 0x35, 0x34, 0x36, 0x38, 0x37, 0x33, 0x31, 0x31, 0x35, 0x39, 0x35, 0x40400355, 0x33, 0x38, 0x38, 0x32, 0x33, 0x35, 0x33, 0x37, 0x38, 0x37, 0x35, 0x4080037f, 0x39, 0x4040013a, 0x31, 0x40400148, 0x38, 0x30, 0x35, 0x33, 0x4040018a, 0x32, 0x32, 0x36, 0x38, 0x30, 0x36, 0x36, 0x31, 0x33, 0x30, 0x30, 0x31, 0x39, 0x32, 0x37, 0x38, 0x37, 0x36, 0x36, 0x31, 0x31, 0x31, 0x39, 0x35, 0x39, 0x40400237, 0x36, 0x40800124, 0x38, 0x39, 0x33, 0x38, 0x30, 0x39, 0x35, 0x32, 0x35, 0x37, 0x32, 0x30, 0x31, 0x30, 0x36, 0x35, 0x34, 0x38, 0x35, 0x38, 0x36, 0x33, 0x32, 0x37, 0x4040009a, 0x39, 0x33, 0x36, 0x31, 0x35, 0x33, 0x40400220, 0x4080015c, 0x32, 0x33, 0x30, 0x33, 0x30, 0x31, 0x39, 0x35, 0x32, 0x30, 0x33, 0x35, 0x33, 0x30, 0x31, 0x38, 0x35, 0x32, 0x40400171, 0x40400075, 0x33, 0x36, 0x32, 0x32, 0x35, 0x39, 0x39, 0x34, 0x31, 0x33, 0x40400254, 0x34, 0x39, 0x37, 0x32, 0x31, 0x37, 0x404000de, 0x33, 0x34, 0x37, 0x39, 0x31, 0x33, 0x31, 0x35, 0x31, 0x35, 0x35, 0x37, 0x34, 0x38, 0x35, 0x37, 0x32, 0x34, 0x32, 0x34, 0x35, 0x34, 0x31, 0x35, 0x30, 0x36, 0x39, 0x4040013f, 0x38, 0x32, 0x39, 0x35, 0x33, 0x33, 0x31, 0x31, 0x36, 0x38, 0x36, 0x31, 0x37, 0x32, 0x37, 0x38, 0x40400337, 0x39, 0x30, 0x37, 0x35, 0x30, 0x39, 0x4040010d, 0x37, 0x35, 0x34, 0x36, 0x33, 0x37, 0x34, 0x36, 0x34, 0x39, 0x33, 0x39, 0x33, 0x31, 0x39, 0x32, 0x35, 0x35, 0x30, 0x36, 0x30, 0x34, 0x30, 0x30, 0x39, 0x4040026b, 0x31, 0x36, 0x37, 0x31, 0x31, 0x33, 0x39, 0x30, 0x30, 0x39, 0x38, 0x40400335, 0x34, 0x30, 0x31, 0x32, 0x38, 0x35, 0x38, 0x33, 0x36, 0x31, 0x36, 0x30, 0x33, 0x35, 0x36, 0x33, 0x37, 0x30, 0x37, 0x36, 0x36, 0x30, 0x31, 0x30, 0x34, 0x40400172, 0x38, 0x31, 0x39, 0x34, 0x32, 0x39, 0x4080041e, 0x404000ef, 0x4040028b, 0x37, 0x38, 0x33, 0x37, 0x34, 0x404004a8, 0x38, 0x32, 0x35, 0x35, 0x33, 0x37, 0x40800209, 0x32, 0x36, 0x38, 0x4040002e, 0x34, 0x30, 0x34, 0x37, 0x404001d1, 0x34, 0x404004b5, 0x4040038d, 0x38, 0x34, 0x404003a8, 0x36, 0x40c0031f, 0x33, 0x33, 0x31, 0x33, 0x36, 0x37, 0x37, 0x30, 0x32, 0x38, 0x39, 0x38, 0x39, 0x31, 0x35, 0x32, 0x40400062, 0x35, 0x32, 0x31, 0x36, 0x32, 0x30, 0x35, 0x36, 0x39, 0x36, 0x40400411, 0x30, 0x35, 0x38, 0x40400477, 0x35, 0x40400498, 0x35, 0x31, 0x31, 0x40400209, 0x38, 0x32, 0x34, 0x33, 0x30, 0x30, 0x33, 0x35, 0x35, 0x38, 0x37, 0x36, 0x34, 0x30, 0x32, 0x34, 0x37, 0x34, 0x39, 0x36, 0x34, 0x37, 0x33, 0x32, 0x36, 0x33, 0x4040043e, 0x39, 0x39, 0x32, 0x4040044b, 0x34, 0x32, 0x36, 0x39, 0x40c002c5, 0x37, 0x404001d6, 0x34, 0x4040053d, 0x4040041d, 0x39, 0x33, 0x34, 0x31, 0x37, 0x404001ad, 0x31, 0x32, 0x4040002a, 0x34, 0x4040019e, 0x31, 0x35, 0x30, 0x33, 0x30, 0x32, 0x38, 0x36, 0x31, 0x38, 0x32, 0x39, 0x37, 0x34, 0x35, 0x35, 0x35, 0x37, 0x30, 0x36, 0x37, 0x34, 0x40400135, 0x35, 0x30, 0x35, 0x34, 0x39, 0x34, 0x35, 0x38, 0x404001c5, 0x39, 0x40400051, 0x35, 0x36, 0x404001ec, 0x37, 0x32, 0x31, 0x30, 0x37, 0x39, 0x40400159, 0x33, 0x30, 0x4040010a, 0x33, 0x32, 0x31, 0x31, 0x36, 0x35, 0x33, 0x34, 0x34, 0x39, 0x38, 0x37, 0x32, 0x30, 0x32, 0x37, 0x4040011b, 0x30, 0x32, 0x33, 0x36, 0x34, 0x4040022e, 0x35, 0x34, 0x39, 0x39, 0x31, 0x31, 0x39, 0x38, 0x40400418, 0x34, 0x4040011b, 0x35, 0x33, 0x35, 0x36, 0x36, 0x33, 0x36, 0x39, 0x40400450, 0x32, 0x36, 0x35, 0x404002e4, 0x37, 0x38, 0x36, 0x32, 0x35, 0x35, 0x31, 0x404003da, 0x31, 0x37, 0x35, 0x37, 0x34, 0x36, 0x37, 0x32, 0x38, 0x39, 0x30, 0x39, 0x37, 0x37, 0x37, 0x37, 0x40800453, 0x30, 0x30, 0x30, 0x404005fd, 0x37, 0x30, 0x404004df, 0x36, 0x404003e9, 0x34, 0x39, 0x31, 0x4040041e, 0x40400297, 0x32, 0x31, 0x34, 0x37, 0x37, 0x32, 0x33, 0x35, 0x30, 0x31, 0x34, 0x31, 0x34, 0x40400643, 0x33, 0x35, 0x36, 0x404004af, 0x31, 0x36, 0x31, 0x33, 0x36, 0x31, 0x31, 0x35, 0x37, 0x33, 0x35, 0x32, 0x35, 0x40400504, 0x33, 0x34, 0x4040005b, 0x31, 0x38, 0x4040047b, 0x38, 0x34, 0x404005e7, 0x33, 0x33, 0x32, 0x33, 0x39, 0x30, 0x37, 0x33, 0x39, 0x34, 0x31, 0x34, 0x33, 0x33, 0x33, 0x34, 0x35, 0x34, 0x37, 0x37, 0x36, 0x32, 0x34, 0x40400242, 0x32, 0x35, 0x31, 0x38, 0x39, 0x38, 0x33, 0x35, 0x36, 0x39, 0x34, 0x38, 0x35, 0x35, 0x36, 0x32, 0x30, 0x39, 0x39, 0x32, 0x31, 0x39, 0x32, 0x32, 0x32, 0x31, 0x38, 0x34, 0x32, 0x37, 0x4040023e, 0x32, 0x404000ba, 0x36, 0x38, 0x38, 0x37, 0x36, 0x37, 0x31, 0x37, 0x39, 0x30, 0x40400055, 0x30, 0x40800106, 0x36, 0x36, 0x404003e7, 0x38, 0x38, 0x36, 0x32, 0x37, 0x32, 0x404006dc, 0x31, 0x37, 0x38, 0x36, 0x30, 0x38, 0x35, 0x37, 0x40400073, 0x33, 0x408002fc, 0x37, 0x39, 0x37, 0x36, 0x36, 0x38, 0x31, 0x404002bd, 0x30, 0x30, 0x39, 0x35, 0x33, 0x38, 0x38, 0x40400638, 0x33, 0x404006a5, 0x30, 0x36, 0x38, 0x30, 0x30, 0x36, 0x34, 0x32, 0x32, 0x35, 0x31, 0x32, 0x35, 0x32, 0x4040057b, 0x37, 0x33, 0x39, 0x32, 0x40400297, 0x40400474, 0x34, 0x408006b3, 0x38, 0x36, 0x32, 0x36, 0x39, 0x34, 0x35, 0x404001e5, 0x34, 0x31, 0x39, 0x36, 0x35, 0x32, 0x38, 0x35, 0x30, 0x40400099, 0x4040039c, 0x31, 0x38, 0x36, 0x33, 0x404001be, 0x34, 0x40800154, 0x32, 0x30, 0x33, 0x39, 0x4040058b, 0x34, 0x35, 0x404002bc, 0x32, 0x33, 0x37, 0x4040042c, 0x36, 0x40400510, 0x35, 0x36, 0x40400638, 0x37, 0x31, 0x39, 0x31, 0x37, 0x32, 0x38, 0x40400171, 0x37, 0x36, 0x34, 0x36, 0x35, 0x37, 0x35, 0x37, 0x33, 0x39, 0x40400101, 0x33, 0x38, 0x39, 0x40400748, 0x38, 0x33, 0x32, 0x36, 0x34, 0x35, 0x39, 0x39, 0x35, 0x38, 0x404006a7, 0x30, 0x34, 0x37, 0x38, 0x404001de, 0x40400328, 0x39, 0x4040002d, 0x36, 0x34, 0x30, 0x37, 0x38, 0x39, 0x35, 0x31, 0x4040008e, 0x36, 0x38, 0x33, 0x4040012f, 0x32, 0x35, 0x39, 0x35, 0x37, 0x30, 0x40400468, 0x38, 0x32, 0x32, 0x404002c8, 0x32, 0x4040061b, 0x34, 0x30, 0x37, 0x37, 0x32, 0x36, 0x37, 0x31, 0x39, 0x34, 0x37, 0x38, 0x40400319, 0x38, 0x32, 0x36, 0x30, 0x31, 0x34, 0x37, 0x36, 0x39, 0x39, 0x30, 0x39, 0x404004e8, 0x30, 0x31, 0x33, 0x36, 0x33, 0x39, 0x34, 0x34, 0x33, 0x4040027f, 0x33, 0x30, 0x40400105, 0x32, 0x30, 0x33, 0x34, 0x39, 0x36, 0x32, 0x35, 0x32, 0x34, 0x35, 0x31, 0x37, 0x404003b5, 0x39, 0x36, 0x35, 0x31, 0x34, 0x33, 0x31, 0x34, 0x32, 0x39, 0x38, 0x30, 0x39, 0x31, 0x39, 0x30, 0x36, 0x35, 0x39, 0x32, 0x40400282, 0x37, 0x32, 0x32, 0x31, 0x36, 0x39, 0x36, 0x34, 0x36, 0x40400419, 0x4040007a, 0x35, 0x4040050e, 0x34, 0x40800565, 0x38, 0x40400559, 0x39, 0x37, 0x4040057b, 0x35, 0x34, 0x4040049d, 0x4040023e, 0x37, 0x4040065a, 0x38, 0x34, 0x36, 0x38, 0x31, 0x33, 0x4040008c, 0x36, 0x38, 0x33, 0x38, 0x36, 0x38, 0x39, 0x34, 0x32, 0x37, 0x37, 0x34, 0x31, 0x35, 0x35, 0x39, 0x39, 0x31, 0x38, 0x35, 0x4040005a, 0x32, 0x34, 0x35, 0x39, 0x35, 0x33, 0x39, 0x35, 0x39, 0x34, 0x33, 0x31, 0x404005b7, 0x37, 0x40400012, 0x36, 0x38, 0x30, 0x38, 0x34, 0x35, 0x404002e7, 0x37, 0x33, 0x4040081e, 0x39, 0x35, 0x38, 0x34, 0x38, 0x36, 0x35, 0x33, 0x38, 0x404006e8, 0x36, 0x32, 0x404000f2, 0x36, 0x30, 0x39, 0x404004b6, 0x36, 0x30, 0x38, 0x30, 0x35, 0x31, 0x32, 0x34, 0x33, 0x38, 0x38, 0x34, 0x4040013a, 0x4040000b, 0x34, 0x31, 0x33, 0x4040030f, 0x37, 0x36, 0x32, 0x37, 0x38, 0x40400341, 0x37, 0x31, 0x35, 0x4040059b, 0x33, 0x35, 0x39, 0x39, 0x37, 0x37, 0x30, 0x30, 0x31, 0x32, 0x39, 0x40400472, 0x38, 0x39, 0x34, 0x34, 0x31, 0x40400277, 0x36, 0x38, 0x35, 0x35, 0x4040005f, 0x34, 0x30, 0x36, 0x33, 0x404008e6, 0x32, 0x30, 0x37, 0x32, 0x32, 0x40400158, 0x40800203, 0x34, 0x38, 0x31, 0x35, 0x38, 0x40400205, 0x404001fe, 0x4040027a, 0x40400298, 0x33, 0x39, 0x34, 0x35, 0x32, 0x32, 0x36, 0x37, 0x40c00496, 0x38, 0x4040058a, 0x32, 0x31, 0x404002ea, 0x32, 0x40400387, 0x35, 0x34, 0x36, 0x36, 0x36, 0x4040051b, 0x32, 0x33, 0x39, 0x38, 0x36, 0x34, 0x35, 0x36, 0x404004c4, 0x31, 0x36, 0x33, 0x35, 0x40800253, 0x40400811, 0x37, 0x404008ad, 0x39, 0x38, 0x4040045e, 0x39, 0x33, 0x36, 0x33, 0x34, 0x4040075b, 0x37, 0x34, 0x33, 0x32, 0x34, 0x4040047b, 0x31, 0x35, 0x30, 0x37, 0x36, 0x404004bb, 0x37, 0x39, 0x34, 0x35, 0x31, 0x30, 0x39, 0x4040003e, 0x30, 0x39, 0x34, 0x30, 0x404006a6, 0x38, 0x38, 0x37, 0x39, 0x37, 0x31, 0x30, 0x38, 0x39, 0x33, 0x404008f0, 0x36, 0x39, 0x31, 0x33, 0x36, 0x38, 0x36, 0x37, 0x32, 0x4040025b, 0x404001fe, 0x35, 0x4040053f, 0x40400468, 0x40400801, 0x31, 0x37, 0x39, 0x32, 0x38, 0x36, 0x38, 0x404008cc, 0x38, 0x37, 0x34, 0x37, 0x4080079e, 0x38, 0x32, 0x34, 0x4040097a, 0x38, 0x4040025b, 0x37, 0x31, 0x34, 0x39, 0x30, 0x39, 0x36, 0x37, 0x35, 0x39, 0x38, 0x404006ef, 0x33, 0x36, 0x35, 0x40400134, 0x38, 0x31, 0x4040005c, 0x40400745, 0x40400936, 0x36, 0x38, 0x32, 0x39, 0x4040057e, 0x38, 0x37, 0x32, 0x32, 0x36, 0x35, 0x38, 0x38, 0x30, 0x40400611, 0x35, 0x40400249, 0x34, 0x32, 0x37, 0x30, 0x34, 0x37, 0x37, 0x35, 0x35, 0x4040081e, 0x33, 0x37, 0x39, 0x36, 0x34, 0x31, 0x34, 0x35, 0x31, 0x35, 0x32, 0x404005fd, 0x32, 0x33, 0x34, 0x33, 0x36, 0x34, 0x35, 0x34, 0x404005de, 0x34, 0x34, 0x34, 0x37, 0x39, 0x35, 0x4040003c, 0x40400523, 0x408008e6, 0x34, 0x31, 0x4040052a, 0x33, 0x40400304, 0x35, 0x32, 0x33, 0x31, 0x40800841, 0x31, 0x36, 0x36, 0x31, 0x404008b2, 0x35, 0x39, 0x36, 0x39, 0x35, 0x33, 0x36, 0x32, 0x33, 0x31, 0x34, 0x404005ff, 0x32, 0x34, 0x38, 0x34, 0x39, 0x33, 0x37, 0x31, 0x38, 0x37, 0x31, 0x31, 0x30, 0x31, 0x34, 0x35, 0x37, 0x36, 0x35, 0x34, 0x40400761, 0x30, 0x32, 0x37, 0x39, 0x39, 0x33, 0x34, 0x34, 0x30, 0x33, 0x37, 0x34, 0x32, 0x30, 0x30, 0x37, 0x4040093f, 0x37, 0x38, 0x35, 0x33, 0x39, 0x30, 0x36, 0x32, 0x31, 0x39, 0x40800299, 0x40400345, 0x38, 0x34, 0x37, 0x408003d2, 0x38, 0x33, 0x33, 0x32, 0x31, 0x34, 0x34, 0x35, 0x37, 0x31, 0x40400284, 0x40400776, 0x34, 0x33, 0x35, 0x30, 0x40400928, 0x40400468, 0x35, 0x33, 0x31, 0x39, 0x31, 0x30, 0x34, 0x38, 0x34, 0x38, 0x31, 0x30, 0x30, 0x35, 0x33, 0x37, 0x30, 0x36, 0x404008bc, 0x4080059d, 0x40800781, 0x31, 0x40400559, 0x37, 0x4040031b, 0x35, 0x404007ec, 0x4040040c, 0x36, 0x33, 0x408007dc, 0x34, 0x40400971, 0x4080034e, 0x408003f5, 0x38, 0x4080052d, 0x40800887, 0x39, 0x40400187, 0x39, 0x31, 0x404008ce, 0x38, 0x31, 0x34, 0x36, 0x37, 0x35, 0x31, 0x4040062b, 0x31, 0x32, 0x33, 0x39, 0x40c001a9, 0x39, 0x30, 0x37, 0x31, 0x38, 0x36, 0x34, 0x39, 0x34, 0x32, 0x33, 0x31, 0x39, 0x36, 0x31, 0x35, 0x36, 0x404001ec, 0x404006bc, 0x39, 0x35, 0x40400926, 0x40400469, 0x4040011b, 0x36, 0x30, 0x33, 0x38, 0x40400a25, 0x4040016f, 0x40400384, 0x36, 0x32, 0x4040045a, 0x35, 0x4040084c, 0x36, 0x33, 0x38, 0x39, 0x33, 0x37, 0x37, 0x38, 0x37, 0x404008c5, 0x404000f8, 0x39, 0x37, 0x39, 0x32, 0x30, 0x37, 0x37, 0x33, 0x404005d7, 0x32, 0x31, 0x38, 0x32, 0x35, 0x36, 0x404007df, 0x36, 0x36, 0x404006d6, 0x34, 0x32, 0x4080067e, 0x36, 0x404006e6, 0x34, 0x34, 0x40400024, 0x35, 0x34, 0x39, 0x32, 0x30, 0x32, 0x36, 0x30, 0x35, 0x40400ab3, 0x408003e4, 0x32, 0x30, 0x31, 0x34, 0x39, 0x404004d2, 0x38, 0x35, 0x30, 0x37, 0x33, 0x40400599, 0x36, 0x36, 0x36, 0x30, 0x40400194, 0x32, 0x34, 0x33, 0x34, 0x30, 0x40400087, 0x30, 0x4040076b, 0x38, 0x36, 0x33, 0x40400956, 0x404007e4, 0x4040042b, 0x40400174, 0x35, 0x37, 0x39, 0x36, 0x32, 0x36, 0x38, 0x35, 0x36, 0x40400140, 0x35, 0x30, 0x38, 0x40400523, 0x35, 0x38, 0x37, 0x39, 0x36, 0x39, 0x39, 0x40400711, 0x35, 0x37, 0x34, 0x40400a18, 0x38, 0x34, 0x30, 0x404008b3, 0x31, 0x34, 0x35, 0x39, 0x31, 0x4040078c, 0x37, 0x30, 0x40400234, 0x30, 0x31, 0x40400be7, 0x31, 0x32, 0x40400c74, 0x30, 0x404003c3, 0x33, 0x39, 0x40400b2a, 0x40400112, 0x37, 0x31, 0x35, 0x404003b0, 0x34, 0x32, 0x30, 0x40800bf2, 0x39, 0x40400bc2, 0x30, 0x37, 0x40400341, 0x40400795, 0x40400aaf, 0x40400c62, 0x32, 0x31, 0x40400960, 0x32, 0x35, 0x31, 0x4040057b, 0x40400944, 0x39, 0x32, 0x404001b2, 0x38, 0x32, 0x36, 0x40400b66, 0x32, 0x40400278, 0x33, 0x32, 0x31, 0x35, 0x37, 0x39, 0x31, 0x39, 0x38, 0x34, 0x31, 0x34, 0x4080087b, 0x39, 0x31, 0x36, 0x34, 0x408006e8, 0x39, 0x40800b58, 0x404008db, 0x37, 0x32, 0x32, 0x40400321, 0x35, 0x404008a4, 0x40400141, 0x39, 0x31, 0x30, 0x404000bc, 0x40400c5b, 0x35, 0x32, 0x38, 0x30, 0x31, 0x37, 0x40400231, 0x37, 0x31, 0x32, 0x40400914, 0x38, 0x33, 0x32, 0x40400373, 0x31, 0x40400589, 0x30, 0x39, 0x33, 0x35, 0x33, 0x39, 0x36, 0x35, 0x37, 0x4040064b, 0x31, 0x30, 0x38, 0x33, 0x40400069, 0x35, 0x31, 0x4040077a, 0x40400d5a, 0x31, 0x34, 0x34, 0x34, 0x32, 0x31, 0x30, 0x30, 0x40400202, 0x30, 0x33, 0x4040019c, 0x31, 0x31, 0x30, 0x33, 0x40400c81, 0x40400009, 0x40400026, 0x40c00602, 0x35, 0x31, 0x36, 0x404005d9, 0x40800883, 0x4040092a, 0x35, 0x40800c42, 0x38, 0x35, 0x31, 0x37, 0x31, 0x34, 0x33, 0x37, 0x40400605, 0x4040006d, 0x31, 0x35, 0x35, 0x36, 0x35, 0x30, 0x38, 0x38, 0x404003b9, 0x39, 0x38, 0x39, 0x38, 0x35, 0x39, 0x39, 0x38, 0x32, 0x33, 0x38, 0x404001cf, 0x404009ba, 0x33, 0x4040016c, 0x4040043e, 0x404009c3, 0x38, 0x40800e05, 0x33, 0x32, 0x40400107, 0x35, 0x40400305, 0x33, 0x404001ca, 0x39, 0x4040041b, 0x39, 0x38, 0x4040087d, 0x34, 0x40400cb8, 0x37, 0x4040064b, 0x30, 0x37, 0x404000e5, 0x34, 0x38, 0x31, 0x34, 0x31, 0x40400539, 0x38, 0x35, 0x39, 0x34, 0x36, 0x31, 0x40400bc9, 0x38, 0x30}, + }, + { + input: "testdata/huffman-rand-1k.in", + want: "testdata/huffman-rand-1k.%s.expect", + wantNoInput: "testdata/huffman-rand-1k.%s.expect-noinput", + tokens: []token{0xf8, 0x8b, 0x96, 0x76, 0x48, 0xd, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xd, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa, 0x64, 0xb, 0xe0, 0x23, 0x29, 0xbd, 0xf7, 0xe7, 0x83, 0x3c, 0xfb, 0xdf, 0xb3, 0xae, 0x4f, 0xa4, 0x47, 0x55, 0x99, 0xde, 0x2f, 0x96, 0x6e, 0x1c, 0x43, 0x4c, 0x87, 0xe2, 0x7c, 0xd9, 0x5f, 0x4c, 0x7c, 0xe8, 0x90, 0x3, 0xdb, 0x30, 0x95, 0xd6, 0x22, 0xc, 0x47, 0xb8, 0x4d, 0x6b, 0xbd, 0x24, 0x11, 0xab, 0x2c, 0xd7, 0xbe, 0x6e, 0x7a, 0xd6, 0x8, 0xa3, 0x98, 0xd8, 0xdd, 0x15, 0x6a, 0xfa, 0x93, 0x30, 0x1, 0x25, 0x1d, 0xa2, 0x74, 0x86, 0x4b, 0x6a, 0x95, 0xe8, 0xe1, 0x4e, 0xe, 0x76, 0xb9, 0x49, 0xa9, 0x5f, 0xa0, 0xa6, 0x63, 0x3c, 0x7e, 0x7e, 0x20, 0x13, 0x4f, 0xbb, 0x66, 0x92, 0xb8, 0x2e, 0xa4, 0xfa, 0x48, 0xcb, 0xae, 0xb9, 0x3c, 0xaf, 0xd3, 0x1f, 0xe1, 0xd5, 0x8d, 0x42, 0x6d, 0xf0, 0xfc, 0x8c, 0xc, 0x0, 0xde, 0x40, 0xab, 0x8b, 0x47, 0x97, 0x4e, 0xa8, 0xcf, 0x8e, 0xdb, 0xa6, 0x8b, 0x20, 0x9, 0x84, 0x7a, 0x66, 0xe5, 0x98, 0x29, 0x2, 0x95, 0xe6, 0x38, 0x32, 0x60, 0x3, 0xe3, 0x9a, 0x1e, 0x54, 0xe8, 0x63, 0x80, 0x48, 0x9c, 0xe7, 0x63, 0x33, 0x6e, 0xa0, 0x65, 0x83, 0xfa, 0xc6, 0xba, 0x7a, 0x43, 0x71, 0x5, 0xf5, 0x68, 0x69, 0x85, 0x9c, 0xba, 0x45, 0xcd, 0x6b, 0xb, 0x19, 0xd1, 0xbb, 0x7f, 0x70, 0x85, 0x92, 0xd1, 0xb4, 0x64, 0x82, 0xb1, 0xe4, 0x62, 0xc5, 0x3c, 0x46, 0x1f, 0x92, 0x31, 0x1c, 0x4e, 0x41, 0x77, 0xf7, 0xe7, 0x87, 0xa2, 0xf, 0x6e, 0xe8, 0x92, 0x3, 0x6b, 0xa, 0xe7, 0xa9, 0x3b, 0x11, 0xda, 0x66, 0x8a, 0x29, 0xda, 0x79, 0xe1, 0x64, 0x8d, 0xe3, 0x54, 0xd4, 0xf5, 0xef, 0x64, 0x87, 0x3b, 0xf4, 0xc2, 0xf4, 0x71, 0x13, 0xa9, 0xe9, 0xe0, 0xa2, 0x6, 0x14, 0xab, 0x5d, 0xa7, 0x96, 0x0, 0xd6, 0xc3, 0xcc, 0x57, 0xed, 0x39, 0x6a, 0x25, 0xcd, 0x76, 0xea, 0xba, 0x3a, 0xf2, 0xa1, 0x95, 0x5d, 0xe5, 0x71, 0xcf, 0x9c, 0x62, 0x9e, 0x6a, 0xfa, 0xd5, 0x31, 0xd1, 0xa8, 0x66, 0x30, 0x33, 0xaa, 0x51, 0x17, 0x13, 0x82, 0x99, 0xc8, 0x14, 0x60, 0x9f, 0x4d, 0x32, 0x6d, 0xda, 0x19, 0x26, 0x21, 0xdc, 0x7e, 0x2e, 0x25, 0x67, 0x72, 0xca, 0xf, 0x92, 0xcd, 0xf6, 0xd6, 0xcb, 0x97, 0x8a, 0x33, 0x58, 0x73, 0x70, 0x91, 0x1d, 0xbf, 0x28, 0x23, 0xa3, 0xc, 0xf1, 0x83, 0xc3, 0xc8, 0x56, 0x77, 0x68, 0xe3, 0x82, 0xba, 0xb9, 0x57, 0x56, 0x57, 0x9c, 0xc3, 0xd6, 0x14, 0x5, 0x3c, 0xb1, 0xaf, 0x93, 0xc8, 0x8a, 0x57, 0x7f, 0x53, 0xfa, 0x2f, 0xaa, 0x6e, 0x66, 0x83, 0xfa, 0x33, 0xd1, 0x21, 0xab, 0x1b, 0x71, 0xb4, 0x7c, 0xda, 0xfd, 0xfb, 0x7f, 0x20, 0xab, 0x5e, 0xd5, 0xca, 0xfd, 0xdd, 0xe0, 0xee, 0xda, 0xba, 0xa8, 0x27, 0x99, 0x97, 0x69, 0xc1, 0x3c, 0x82, 0x8c, 0xa, 0x5c, 0x2d, 0x5b, 0x88, 0x3e, 0x34, 0x35, 0x86, 0x37, 0x46, 0x79, 0xe1, 0xaa, 0x19, 0xfb, 0xaa, 0xde, 0x15, 0x9, 0xd, 0x1a, 0x57, 0xff, 0xb5, 0xf, 0xf3, 0x2b, 0x5a, 0x6a, 0x4d, 0x19, 0x77, 0x71, 0x45, 0xdf, 0x4f, 0xb3, 0xec, 0xf1, 0xeb, 0x18, 0x53, 0x3e, 0x3b, 0x47, 0x8, 0x9a, 0x73, 0xa0, 0x5c, 0x8c, 0x5f, 0xeb, 0xf, 0x3a, 0xc2, 0x43, 0x67, 0xb4, 0x66, 0x67, 0x80, 0x58, 0xe, 0xc1, 0xec, 0x40, 0xd4, 0x22, 0x94, 0xca, 0xf9, 0xe8, 0x92, 0xe4, 0x69, 0x38, 0xbe, 0x67, 0x64, 0xca, 0x50, 0xc7, 0x6, 0x67, 0x42, 0x6e, 0xa3, 0xf0, 0xb7, 0x6c, 0xf2, 0xe8, 0x5f, 0xb1, 0xaf, 0xe7, 0xdb, 0xbb, 0x77, 0xb5, 0xf8, 0xcb, 0x8, 0xc4, 0x75, 0x7e, 0xc0, 0xf9, 0x1c, 0x7f, 0x3c, 0x89, 0x2f, 0xd2, 0x58, 0x3a, 0xe2, 0xf8, 0x91, 0xb6, 0x7b, 0x24, 0x27, 0xe9, 0xae, 0x84, 0x8b, 0xde, 0x74, 0xac, 0xfd, 0xd9, 0xb7, 0x69, 0x2a, 0xec, 0x32, 0x6f, 0xf0, 0x92, 0x84, 0xf1, 0x40, 0xc, 0x8a, 0xbc, 0x39, 0x6e, 0x2e, 0x73, 0xd4, 0x6e, 0x8a, 0x74, 0x2a, 0xdc, 0x60, 0x1f, 0xa3, 0x7, 0xde, 0x75, 0x8b, 0x74, 0xc8, 0xfe, 0x63, 0x75, 0xf6, 0x3d, 0x63, 0xac, 0x33, 0x89, 0xc3, 0xf0, 0xf8, 0x2d, 0x6b, 0xb4, 0x9e, 0x74, 0x8b, 0x5c, 0x33, 0xb4, 0xca, 0xa8, 0xe4, 0x99, 0xb6, 0x90, 0xa1, 0xef, 0xf, 0xd3, 0x61, 0xb2, 0xc6, 0x1a, 0x94, 0x7c, 0x44, 0x55, 0xf4, 0x45, 0xff, 0x9e, 0xa5, 0x5a, 0xc6, 0xa0, 0xe8, 0x2a, 0xc1, 0x8d, 0x6f, 0x34, 0x11, 0xb9, 0xbe, 0x4e, 0xd9, 0x87, 0x97, 0x73, 0xcf, 0x3d, 0x23, 0xae, 0xd5, 0x1a, 0x5e, 0xae, 0x5d, 0x6a, 0x3, 0xf9, 0x22, 0xd, 0x10, 0xd9, 0x47, 0x69, 0x15, 0x3f, 0xee, 0x52, 0xa3, 0x8, 0xd2, 0x3c, 0x51, 0xf4, 0xf8, 0x9d, 0xe4, 0x98, 0x89, 0xc8, 0x67, 0x39, 0xd5, 0x5e, 0x35, 0x78, 0x27, 0xe8, 0x3c, 0x80, 0xae, 0x79, 0x71, 0xd2, 0x93, 0xf4, 0xaa, 0x51, 0x12, 0x1c, 0x4b, 0x1b, 0xe5, 0x6e, 0x15, 0x6f, 0xe4, 0xbb, 0x51, 0x9b, 0x45, 0x9f, 0xf9, 0xc4, 0x8c, 0x2a, 0xfb, 0x1a, 0xdf, 0x55, 0xd3, 0x48, 0x93, 0x27, 0x1, 0x26, 0xc2, 0x6b, 0x55, 0x6d, 0xa2, 0xfb, 0x84, 0x8b, 0xc9, 0x9e, 0x28, 0xc2, 0xef, 0x1a, 0x24, 0xec, 0x9b, 0xae, 0xbd, 0x60, 0xe9, 0x15, 0x35, 0xee, 0x42, 0xa4, 0x33, 0x5b, 0xfa, 0xf, 0xb6, 0xf7, 0x1, 0xa6, 0x2, 0x4c, 0xca, 0x90, 0x58, 0x3a, 0x96, 0x41, 0xe7, 0xcb, 0x9, 0x8c, 0xdb, 0x85, 0x4d, 0xa8, 0x89, 0xf3, 0xb5, 0x8e, 0xfd, 0x75, 0x5b, 0x4f, 0xed, 0xde, 0x3f, 0xeb, 0x38, 0xa3, 0xbe, 0xb0, 0x73, 0xfc, 0xb8, 0x54, 0xf7, 0x4c, 0x30, 0x67, 0x2e, 0x38, 0xa2, 0x54, 0x18, 0xba, 0x8, 0xbf, 0xf2, 0x39, 0xd5, 0xfe, 0xa5, 0x41, 0xc6, 0x66, 0x66, 0xba, 0x81, 0xef, 0x67, 0xe4, 0xe6, 0x3c, 0xc, 0xca, 0xa4, 0xa, 0x79, 0xb3, 0x57, 0x8b, 0x8a, 0x75, 0x98, 0x18, 0x42, 0x2f, 0x29, 0xa3, 0x82, 0xef, 0x9f, 0x86, 0x6, 0x23, 0xe1, 0x75, 0xfa, 0x8, 0xb1, 0xde, 0x17, 0x4a}, + }, + { + input: "testdata/huffman-rand-limit.in", + want: "testdata/huffman-rand-limit.%s.expect", + wantNoInput: "testdata/huffman-rand-limit.%s.expect-noinput", + tokens: []token{0x61, 0x51c00000, 0xa, 0xf8, 0x8b, 0x96, 0x76, 0x48, 0xa, 0x85, 0x94, 0x25, 0x80, 0xaf, 0xc2, 0xfe, 0x8d, 0xe8, 0x20, 0xeb, 0x17, 0x86, 0xc9, 0xb7, 0xc5, 0xde, 0x6, 0xea, 0x7d, 0x18, 0x8b, 0xe7, 0x3e, 0x7, 0xda, 0xdf, 0xff, 0x6c, 0x73, 0xde, 0xcc, 0xe7, 0x6d, 0x8d, 0x4, 0x19, 0x49, 0x7f, 0x47, 0x1f, 0x48, 0x15, 0xb0, 0xe8, 0x9e, 0xf2, 0x31, 0x59, 0xde, 0x34, 0xb4, 0x5b, 0xe5, 0xe0, 0x9, 0x11, 0x30, 0xc2, 0x88, 0x5b, 0x7c, 0x5d, 0x14, 0x13, 0x6f, 0x23, 0xa9, 0xa, 0xbc, 0x2d, 0x23, 0xbe, 0xd9, 0xed, 0x75, 0x4, 0x6c, 0x99, 0xdf, 0xfd, 0x70, 0x66, 0xe6, 0xee, 0xd9, 0xb1, 0x9e, 0x6e, 0x83, 0x59, 0xd5, 0xd4, 0x80, 0x59, 0x98, 0x77, 0x89, 0x43, 0x38, 0xc9, 0xaf, 0x30, 0x32, 0x9a, 0x20, 0x1b, 0x46, 0x3d, 0x67, 0x6e, 0xd7, 0x72, 0x9e, 0x4e, 0x21, 0x4f, 0xc6, 0xe0, 0xd4, 0x7b, 0x4, 0x8d, 0xa5, 0x3, 0xf6, 0x5, 0x9b, 0x6b, 0xdc, 0x2a, 0x93, 0x77, 0x28, 0xfd, 0xb4, 0x62, 0xda, 0x20, 0xe7, 0x1f, 0xab, 0x6b, 0x51, 0x43, 0x39, 0x2f, 0xa0, 0x92, 0x1, 0x6c, 0x75, 0x3e, 0xf4, 0x35, 0xfd, 0x43, 0x2e, 0xf7, 0xa4, 0x75, 0xda, 0xea, 0x9b, 0xa}, + }, + { + input: "testdata/huffman-shifts.in", + want: "testdata/huffman-shifts.%s.expect", + wantNoInput: "testdata/huffman-shifts.%s.expect-noinput", + tokens: []token{0x31, 0x30, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x52400001, 0xd, 0xa, 0x32, 0x33, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7fc00001, 0x7f400001}, + }, + { + input: "testdata/huffman-text-shift.in", + want: "testdata/huffman-text-shift.%s.expect", + wantNoInput: "testdata/huffman-text-shift.%s.expect-noinput", + tokens: []token{0x2f, 0x2f, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x32, 0x30, 0x30, 0x39, 0x54, 0x68, 0x47, 0x6f, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x2e, 0x41, 0x6c, 0x6c, 0x40800016, 0x72, 0x72, 0x76, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x55, 0x6f, 0x66, 0x74, 0x68, 0x69, 0x6f, 0x75, 0x72, 0x63, 0x63, 0x6f, 0x64, 0x69, 0x67, 0x6f, 0x76, 0x72, 0x6e, 0x64, 0x62, 0x79, 0x42, 0x53, 0x44, 0x2d, 0x74, 0x79, 0x6c, 0x40400020, 0x6c, 0x69, 0x63, 0x6e, 0x74, 0x68, 0x74, 0x63, 0x6e, 0x62, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x69, 0x6e, 0x74, 0x68, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x66, 0x69, 0x6c, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x63, 0x6b, 0x67, 0x6d, 0x69, 0x6e, 0x4040000a, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x6f, 0x22, 0x4040000c, 0x66, 0x75, 0x6e, 0x63, 0x6d, 0x69, 0x6e, 0x28, 0x29, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x72, 0x62, 0x3d, 0x6d, 0x6b, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x2c, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x5f, 0x3a, 0x3d, 0x6f, 0x2e, 0x43, 0x72, 0x74, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x40800021, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x58, 0x78, 0x79, 0x7a, 0x21, 0x22, 0x23, 0xc2, 0xa4, 0x25, 0x26, 0x2f, 0x3f, 0x22}, + }, + { + input: "testdata/huffman-text.in", + want: "testdata/huffman-text.%s.expect", + wantNoInput: "testdata/huffman-text.%s.expect-noinput", + tokens: []token{0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x70, 0x79, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x32, 0x30, 0x30, 0x39, 0x20, 0x54, 0x68, 0x65, 0x20, 0x47, 0x6f, 0x20, 0x41, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x73, 0x2e, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x4080001e, 0x73, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x2e, 0xd, 0xa, 0x2f, 0x2f, 0x20, 0x55, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x73, 0x20, 0x67, 0x6f, 0x76, 0x65, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x61, 0x20, 0x42, 0x53, 0x44, 0x2d, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x40800036, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, 0x49, 0x43, 0x45, 0x4e, 0x53, 0x45, 0x20, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0xd, 0xa, 0xd, 0xa, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x20, 0x6d, 0x61, 0x69, 0x6e, 0x4040000f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x22, 0x6f, 0x73, 0x22, 0x4040000e, 0x66, 0x75, 0x6e, 0x63, 0x4080001b, 0x28, 0x29, 0x20, 0x7b, 0xd, 0xa, 0x9, 0x76, 0x61, 0x72, 0x20, 0x62, 0x20, 0x3d, 0x20, 0x6d, 0x61, 0x6b, 0x65, 0x28, 0x5b, 0x5d, 0x62, 0x79, 0x74, 0x65, 0x2c, 0x20, 0x36, 0x35, 0x35, 0x33, 0x35, 0x29, 0xd, 0xa, 0x9, 0x66, 0x2c, 0x20, 0x5f, 0x20, 0x3a, 0x3d, 0x20, 0x6f, 0x73, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x28, 0x22, 0x68, 0x75, 0x66, 0x66, 0x6d, 0x61, 0x6e, 0x2d, 0x6e, 0x75, 0x6c, 0x6c, 0x2d, 0x6d, 0x61, 0x78, 0x2e, 0x69, 0x6e, 0x22, 0x4080002a, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x28, 0x62, 0x29, 0xd, 0xa, 0x7d, 0xd, 0xa}, + }, + { + input: "testdata/huffman-zero.in", + want: "testdata/huffman-zero.%s.expect", + wantNoInput: "testdata/huffman-zero.%s.expect-noinput", + tokens: []token{0x30, ml, 0x4b800000}, + }, + { + input: "", + want: "", + wantNoInput: "testdata/null-long-match.%s.expect-noinput", + tokens: []token{0x0, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, ml, 0x41400000}, + }, +} + +// TestWriteBlock tests if the writeBlock encoding has changed. +// To update the reference files use the "-update" flag on the test. +func TestWriteBlock(t *testing.T) { + for _, test := range writeBlockTests { + testBlock(t, test, "wb") + } +} + +// TestWriteBlockDynamic tests if the writeBlockDynamic encoding has changed. +// To update the reference files use the "-update" flag on the test. +func TestWriteBlockDynamic(t *testing.T) { + for _, test := range writeBlockTests { + testBlock(t, test, "dyn") + } +} + +// testBlock tests a block against its references, +// or regenerate the references, if "-update" flag is set. +func testBlock(t *testing.T, test huffTest, ttype string) { + if test.want != "" { + test.want = fmt.Sprintf(test.want, ttype) + } + test.wantNoInput = fmt.Sprintf(test.wantNoInput, ttype) + if *update { + if test.input != "" { + t.Logf("Updating %q", test.want) + input, err := ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + + f, err := os.Create(test.want) + if err != nil { + t.Error(err) + return + } + defer f.Close() + bw := newHuffmanBitWriter(f) + writeToType(t, ttype, bw, test.tokens, input) + } + + t.Logf("Updating %q", test.wantNoInput) + f, err := os.Create(test.wantNoInput) + if err != nil { + t.Error(err) + return + } + defer f.Close() + bw := newHuffmanBitWriter(f) + writeToType(t, ttype, bw, test.tokens, nil) + return + } + + if test.input != "" { + t.Logf("Testing %q", test.want) + input, err := ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + want, err := ioutil.ReadFile(test.want) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + writeToType(t, ttype, bw, test.tokens, input) + + got := buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+".got") + if err := ioutil.WriteFile(test.want+".got", got, 0666); err != nil { + t.Error(err) + } + } + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + writeToType(t, ttype, bw, test.tokens, input) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, want) { + t.Errorf("reset: writeBlock did not yield expected result for file %q with input. See %q", test.want, test.want+".reset.got") + if err := ioutil.WriteFile(test.want+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "wb", test, true) + } + t.Logf("Testing %q", test.wantNoInput) + wantNI, err := ioutil.ReadFile(test.wantNoInput) + if err != nil { + t.Error(err) + return + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + writeToType(t, ttype, bw, test.tokens, nil) + + got := buf.Bytes() + if !bytes.Equal(got, wantNI) { + t.Errorf("writeBlock did not yield expected result for file %q with input. See %q", test.wantNoInput, test.wantNoInput+".got") + if err := ioutil.WriteFile(test.want+".got", got, 0666); err != nil { + t.Error(err) + } + } else if got[0]&1 == 1 { + t.Error("got unexpected EOF") + return + } + + t.Log("Output ok") + + // Test if the writer produces the same output after reset. + buf.Reset() + bw.reset(&buf) + writeToType(t, ttype, bw, test.tokens, nil) + bw.flush() + got = buf.Bytes() + if !bytes.Equal(got, wantNI) { + t.Errorf("reset: writeBlock did not yield expected result for file %q without input. See %q", test.want, test.want+".reset.got") + if err := ioutil.WriteFile(test.want+".reset.got", got, 0666); err != nil { + t.Error(err) + } + return + } + t.Log("Reset ok") + testWriterEOF(t, "wb", test, false) +} + +func writeToType(t *testing.T, ttype string, bw *huffmanBitWriter, tok []token, input []byte) { + switch ttype { + case "wb": + bw.writeBlock(tok, false, input) + case "dyn": + bw.writeBlockDynamic(tok, false, input) + default: + panic("unknown test type") + } + + if bw.err != nil { + t.Error(bw.err) + return + } + + bw.flush() + if bw.err != nil { + t.Error(bw.err) + return + } +} + +// testWriterEOF tests if the written block contains an EOF marker. +func testWriterEOF(t *testing.T, ttype string, test huffTest, useInput bool) { + if useInput && test.input == "" { + return + } + var input []byte + if useInput { + var err error + input, err = ioutil.ReadFile(test.input) + if err != nil { + t.Error(err) + return + } + } + var buf bytes.Buffer + bw := newHuffmanBitWriter(&buf) + switch ttype { + case "wb": + bw.writeBlock(test.tokens, true, input) + case "dyn": + bw.writeBlockDynamic(test.tokens, true, input) + case "huff": + bw.writeBlockHuff(true, input) + default: + panic("unknown test type") + } + if bw.err != nil { + t.Error(bw.err) + return + } + + bw.flush() + if bw.err != nil { + t.Error(bw.err) + return + } + b := buf.Bytes() + if len(b) == 0 { + t.Error("no output received") + return + } + if b[0]&1 != 1 { + t.Errorf("block not marked with EOF for input %q", test.input) + return + } + t.Log("EOF ok") +} diff --git a/vendor/github.com/klauspost/compress/flate/inflate_test.go b/vendor/github.com/klauspost/compress/flate/inflate_test.go new file mode 100644 index 0000000000..8402c0c529 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate_test.go @@ -0,0 +1,282 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "crypto/rand" + "io" + "io/ioutil" + "strconv" + "strings" + "testing" +) + +func TestReset(t *testing.T) { + ss := []string{ + "lorem ipsum izzle fo rizzle", + "the quick brown fox jumped over", + } + + deflated := make([]bytes.Buffer, 2) + for i, s := range ss { + w, _ := NewWriter(&deflated[i], 1) + w.Write([]byte(s)) + w.Close() + } + + inflated := make([]bytes.Buffer, 2) + + f := NewReader(&deflated[0]) + io.Copy(&inflated[0], f) + f.(Resetter).Reset(&deflated[1], nil) + io.Copy(&inflated[1], f) + f.Close() + + for i, s := range ss { + if s != inflated[i].String() { + t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s) + } + } +} + +func TestReaderTruncated(t *testing.T) { + vectors := []struct{ input, output string }{ + {"\x00", ""}, + {"\x00\f", ""}, + {"\x00\f\x00", ""}, + {"\x00\f\x00\xf3\xff", ""}, + {"\x00\f\x00\xf3\xffhello", "hello"}, + {"\x00\f\x00\xf3\xffhello, world", "hello, world"}, + {"\x02", ""}, + {"\xf2H\xcd", "He"}, + {"\xf2H͙0a\u0084\t", "Hel\x90\x90\x90\x90\x90"}, + {"\xf2H͙0a\u0084\t\x00", "Hel\x90\x90\x90\x90\x90"}, + } + + for i, v := range vectors { + r := strings.NewReader(v.input) + zr := NewReader(r) + b, err := ioutil.ReadAll(zr) + if err != io.ErrUnexpectedEOF { + t.Errorf("test %d, error mismatch: got %v, want io.ErrUnexpectedEOF", i, err) + } + if string(b) != v.output { + t.Errorf("test %d, output mismatch: got %q, want %q", i, b, v.output) + } + } +} + +func TestResetDict(t *testing.T) { + dict := []byte("the lorem fox") + ss := []string{ + "lorem ipsum izzle fo rizzle", + "the quick brown fox jumped over", + } + + deflated := make([]bytes.Buffer, len(ss)) + for i, s := range ss { + w, _ := NewWriterDict(&deflated[i], DefaultCompression, dict) + w.Write([]byte(s)) + w.Close() + } + + inflated := make([]bytes.Buffer, len(ss)) + + f := NewReader(nil) + for i := range inflated { + f.(Resetter).Reset(&deflated[i], dict) + io.Copy(&inflated[i], f) + } + f.Close() + + for i, s := range ss { + if s != inflated[i].String() { + t.Errorf("inflated[%d]:\ngot %q\nwant %q", i, inflated[i], s) + } + } +} + +// Tests ported from zlib/test/infcover.c +type infTest struct { + hex string + id string + n int +} + +var infTests = []infTest{ + {"0 0 0 0 0", "invalid stored block lengths", 1}, + {"3 0", "fixed", 0}, + {"6", "invalid block type", 1}, + {"1 1 0 fe ff 0", "stored", 0}, + {"fc 0 0", "too many length or distance symbols", 1}, + {"4 0 fe ff", "invalid code lengths set", 1}, + {"4 0 24 49 0", "invalid bit length repeat", 1}, + {"4 0 24 e9 ff ff", "invalid bit length repeat", 1}, + {"4 0 24 e9 ff 6d", "invalid code -- missing end-of-block", 1}, + {"4 80 49 92 24 49 92 24 71 ff ff 93 11 0", "invalid literal/lengths set", 1}, + {"4 80 49 92 24 49 92 24 f b4 ff ff c3 84", "invalid distances set", 1}, + {"4 c0 81 8 0 0 0 0 20 7f eb b 0 0", "invalid literal/length code", 1}, + {"2 7e ff ff", "invalid distance code", 1}, + {"c c0 81 0 0 0 0 0 90 ff 6b 4 0", "invalid distance too far back", 1}, + + // also trailer mismatch just in inflate() + {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 1", "incorrect data check", -1}, + {"1f 8b 8 0 0 0 0 0 0 0 3 0 0 0 0 0 0 0 0 1", "incorrect length check", -1}, + {"5 c0 21 d 0 0 0 80 b0 fe 6d 2f 91 6c", "pull 17", 0}, + {"5 e0 81 91 24 cb b2 2c 49 e2 f 2e 8b 9a 47 56 9f fb fe ec d2 ff 1f", "long code", 0}, + {"ed c0 1 1 0 0 0 40 20 ff 57 1b 42 2c 4f", "length extra", 0}, + {"ed cf c1 b1 2c 47 10 c4 30 fa 6f 35 1d 1 82 59 3d fb be 2e 2a fc f c", "long distance and extra", 0}, + {"ed c0 81 0 0 0 0 80 a0 fd a9 17 a9 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 6", "window end", 0}, +} + +func TestInflate(t *testing.T) { + for _, test := range infTests { + hex := strings.Split(test.hex, " ") + data := make([]byte, len(hex)) + for i, h := range hex { + b, _ := strconv.ParseInt(h, 16, 32) + data[i] = byte(b) + } + buf := bytes.NewReader(data) + r := NewReader(buf) + + _, err := io.Copy(ioutil.Discard, r) + if (test.n == 0 && err == nil) || (test.n != 0 && err != nil) { + t.Logf("%q: OK:", test.id) + t.Logf(" - got %v", err) + continue + } + + if test.n == 0 && err != nil { + t.Errorf("%q: Expected no error, but got %v", test.id, err) + continue + } + + if test.n != 0 && err == nil { + t.Errorf("%q:Expected an error, but got none", test.id) + continue + } + t.Fatal(test.n, err) + } + + for _, test := range infOutTests { + hex := strings.Split(test.hex, " ") + data := make([]byte, len(hex)) + for i, h := range hex { + b, _ := strconv.ParseInt(h, 16, 32) + data[i] = byte(b) + } + buf := bytes.NewReader(data) + r := NewReader(buf) + + _, err := io.Copy(ioutil.Discard, r) + if test.err == (err != nil) { + t.Logf("%q: OK:", test.id) + t.Logf(" - got %v", err) + continue + } + + if test.err == false && err != nil { + t.Errorf("%q: Expected no error, but got %v", test.id, err) + continue + } + + if test.err && err == nil { + t.Errorf("%q: Expected an error, but got none", test.id) + continue + } + t.Fatal(test.err, err) + } + +} + +// Tests ported from zlib/test/infcover.c +// Since zlib inflate is push (writer) instead of pull (reader) +// some of the window size tests have been removed, since they +// are irrelevant. +type infOutTest struct { + hex string + id string + step int + win int + length int + err bool +} + +var infOutTests = []infOutTest{ + {"2 8 20 80 0 3 0", "inflate_fast TYPE return", 0, -15, 258, false}, + {"63 18 5 40 c 0", "window wrap", 3, -8, 300, false}, + {"e5 e0 81 ad 6d cb b2 2c c9 01 1e 59 63 ae 7d ee fb 4d fd b5 35 41 68 ff 7f 0f 0 0 0", "fast length extra bits", 0, -8, 258, true}, + {"25 fd 81 b5 6d 59 b6 6a 49 ea af 35 6 34 eb 8c b9 f6 b9 1e ef 67 49 50 fe ff ff 3f 0 0", "fast distance extra bits", 0, -8, 258, true}, + {"3 7e 0 0 0 0 0", "fast invalid distance code", 0, -8, 258, true}, + {"1b 7 0 0 0 0 0", "fast invalid literal/length code", 0, -8, 258, true}, + {"d c7 1 ae eb 38 c 4 41 a0 87 72 de df fb 1f b8 36 b1 38 5d ff ff 0", "fast 2nd level codes and too far back", 0, -8, 258, true}, + {"63 18 5 8c 10 8 0 0 0 0", "very common case", 0, -8, 259, false}, + {"63 60 60 18 c9 0 8 18 18 18 26 c0 28 0 29 0 0 0", "contiguous and wrap around window", 6, -8, 259, false}, + {"63 0 3 0 0 0 0 0", "copy direct from output", 0, -8, 259, false}, + {"1f 8b 0 0", "bad gzip method", 0, 31, 0, true}, + {"1f 8b 8 80", "bad gzip flags", 0, 31, 0, true}, + {"77 85", "bad zlib method", 0, 15, 0, true}, + {"78 9c", "bad zlib window size", 0, 8, 0, true}, + {"1f 8b 8 1e 0 0 0 0 0 0 1 0 0 0 0 0 0", "bad header crc", 0, 47, 1, true}, + {"1f 8b 8 2 0 0 0 0 0 0 1d 26 3 0 0 0 0 0 0 0 0 0", "check gzip length", 0, 47, 0, true}, + {"78 90", "bad zlib header check", 0, 47, 0, true}, + {"8 b8 0 0 0 1", "need dictionary", 0, 8, 0, true}, + {"63 18 68 30 d0 0 0", "force split window update", 4, -8, 259, false}, + {"3 0", "use fixed blocks", 0, -15, 1, false}, + {"", "bad window size", 0, 1, 0, true}, +} + +func TestWriteTo(t *testing.T) { + input := make([]byte, 100000) + n, err := rand.Read(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + compressed := &bytes.Buffer{} + w, err := NewWriter(compressed, -2) + if err != nil { + t.Fatal(err) + } + n, err = w.Write(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + w.Close() + buf := compressed.Bytes() + + dec := NewReader(bytes.NewBuffer(buf)) + // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure. + readall, err := ioutil.ReadAll(ioutil.NopCloser(dec)) + if err != nil { + t.Fatal(err) + } + if len(readall) != len(input) { + t.Fatal("did not decompress everything") + } + + dec = NewReader(bytes.NewBuffer(buf)) + wtbuf := &bytes.Buffer{} + written, err := dec.(io.WriterTo).WriteTo(wtbuf) + if err != nil { + t.Fatal(err) + } + if written != int64(len(input)) { + t.Error("Returned length did not match, expected", len(input), "got", written) + } + if wtbuf.Len() != len(input) { + t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len()) + } + if bytes.Compare(wtbuf.Bytes(), input) != 0 { + t.Fatal("output did not match input") + } +} diff --git a/vendor/github.com/klauspost/compress/flate/reader_test.go b/vendor/github.com/klauspost/compress/flate/reader_test.go new file mode 100644 index 0000000000..e42bd010f6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reader_test.go @@ -0,0 +1,97 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "io" + "io/ioutil" + "runtime" + "strings" + "testing" +) + +func TestNlitOutOfRange(t *testing.T) { + // Trying to decode this bogus flate data, which has a Huffman table + // with nlit=288, should not panic. + io.Copy(ioutil.Discard, NewReader(strings.NewReader( + "\xfc\xfe\x36\xe7\x5e\x1c\xef\xb3\x55\x58\x77\xb6\x56\xb5\x43\xf4"+ + "\x6f\xf2\xd2\xe6\x3d\x99\xa0\x85\x8c\x48\xeb\xf8\xda\x83\x04\x2a"+ + "\x75\xc4\xf8\x0f\x12\x11\xb9\xb4\x4b\x09\xa0\xbe\x8b\x91\x4c"))) +} + +const ( + digits = iota + twain +) + +var testfiles = []string{ + // Digits is the digits of the irrational number e. Its decimal representation + // does not repeat, but there are only 10 possible digits, so it should be + // reasonably compressible. + digits: "../testdata/e.txt", + // Twain is Project Gutenberg's edition of Mark Twain's classic English novel. + twain: "../testdata/Mark.Twain-Tom.Sawyer.txt", +} + +func benchmarkDecode(b *testing.B, testfile, level, n int) { + b.ReportAllocs() + b.StopTimer() + b.SetBytes(int64(n)) + buf0, err := ioutil.ReadFile(testfiles[testfile]) + if err != nil { + b.Fatal(err) + } + if len(buf0) == 0 { + b.Fatalf("test file %q has no data", testfiles[testfile]) + } + compressed := new(bytes.Buffer) + w, err := NewWriter(compressed, level) + if err != nil { + b.Fatal(err) + } + for i := 0; i < n; i += len(buf0) { + if len(buf0) > n-i { + buf0 = buf0[:n-i] + } + io.Copy(w, bytes.NewReader(buf0)) + } + w.Close() + buf1 := compressed.Bytes() + buf0, compressed, w = nil, nil, nil + runtime.GC() + b.StartTimer() + for i := 0; i < b.N; i++ { + io.Copy(ioutil.Discard, NewReader(bytes.NewReader(buf1))) + } +} + +// These short names are so that gofmt doesn't break the BenchmarkXxx function +// bodies below over multiple lines. +const ( + constant = ConstantCompression + speed = BestSpeed + default_ = DefaultCompression + compress = BestCompression +) + +func BenchmarkDecodeDigitsSpeed1e4(b *testing.B) { benchmarkDecode(b, digits, speed, 1e4) } +func BenchmarkDecodeDigitsSpeed1e5(b *testing.B) { benchmarkDecode(b, digits, speed, 1e5) } +func BenchmarkDecodeDigitsSpeed1e6(b *testing.B) { benchmarkDecode(b, digits, speed, 1e6) } +func BenchmarkDecodeDigitsDefault1e4(b *testing.B) { benchmarkDecode(b, digits, default_, 1e4) } +func BenchmarkDecodeDigitsDefault1e5(b *testing.B) { benchmarkDecode(b, digits, default_, 1e5) } +func BenchmarkDecodeDigitsDefault1e6(b *testing.B) { benchmarkDecode(b, digits, default_, 1e6) } +func BenchmarkDecodeDigitsCompress1e4(b *testing.B) { benchmarkDecode(b, digits, compress, 1e4) } +func BenchmarkDecodeDigitsCompress1e5(b *testing.B) { benchmarkDecode(b, digits, compress, 1e5) } +func BenchmarkDecodeDigitsCompress1e6(b *testing.B) { benchmarkDecode(b, digits, compress, 1e6) } +func BenchmarkDecodeTwainSpeed1e4(b *testing.B) { benchmarkDecode(b, twain, speed, 1e4) } +func BenchmarkDecodeTwainSpeed1e5(b *testing.B) { benchmarkDecode(b, twain, speed, 1e5) } +func BenchmarkDecodeTwainSpeed1e6(b *testing.B) { benchmarkDecode(b, twain, speed, 1e6) } +func BenchmarkDecodeTwainDefault1e4(b *testing.B) { benchmarkDecode(b, twain, default_, 1e4) } +func BenchmarkDecodeTwainDefault1e5(b *testing.B) { benchmarkDecode(b, twain, default_, 1e5) } +func BenchmarkDecodeTwainDefault1e6(b *testing.B) { benchmarkDecode(b, twain, default_, 1e6) } +func BenchmarkDecodeTwainCompress1e4(b *testing.B) { benchmarkDecode(b, twain, compress, 1e4) } +func BenchmarkDecodeTwainCompress1e5(b *testing.B) { benchmarkDecode(b, twain, compress, 1e5) } +func BenchmarkDecodeTwainCompress1e6(b *testing.B) { benchmarkDecode(b, twain, compress, 1e6) } diff --git a/vendor/github.com/klauspost/compress/flate/writer_test.go b/vendor/github.com/klauspost/compress/flate/writer_test.go new file mode 100644 index 0000000000..024512afb2 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/writer_test.go @@ -0,0 +1,258 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "math/rand" + "runtime" + "testing" +) + +func benchmarkEncoder(b *testing.B, testfile, level, n int) { + b.StopTimer() + b.SetBytes(int64(n)) + buf0, err := ioutil.ReadFile(testfiles[testfile]) + if err != nil { + b.Fatal(err) + } + if len(buf0) == 0 { + b.Fatalf("test file %q has no data", testfiles[testfile]) + } + buf1 := make([]byte, n) + for i := 0; i < n; i += len(buf0) { + if len(buf0) > n-i { + buf0 = buf0[:n-i] + } + copy(buf1[i:], buf0) + } + buf0 = nil + runtime.GC() + w, err := NewWriter(ioutil.Discard, level) + b.StartTimer() + for i := 0; i < b.N; i++ { + w.Reset(ioutil.Discard) + _, err = w.Write(buf1) + if err != nil { + b.Fatal(err) + } + err = w.Close() + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkEncodeDigitsConstant1e4(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e4) } +func BenchmarkEncodeDigitsConstant1e5(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e5) } +func BenchmarkEncodeDigitsConstant1e6(b *testing.B) { benchmarkEncoder(b, digits, constant, 1e6) } +func BenchmarkEncodeDigitsSpeed1e4(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e4) } +func BenchmarkEncodeDigitsSpeed1e5(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e5) } +func BenchmarkEncodeDigitsSpeed1e6(b *testing.B) { benchmarkEncoder(b, digits, speed, 1e6) } +func BenchmarkEncodeDigitsDefault1e4(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e4) } +func BenchmarkEncodeDigitsDefault1e5(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e5) } +func BenchmarkEncodeDigitsDefault1e6(b *testing.B) { benchmarkEncoder(b, digits, default_, 1e6) } +func BenchmarkEncodeDigitsCompress1e4(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e4) } +func BenchmarkEncodeDigitsCompress1e5(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e5) } +func BenchmarkEncodeDigitsCompress1e6(b *testing.B) { benchmarkEncoder(b, digits, compress, 1e6) } +func BenchmarkEncodeTwainConstant1e4(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e4) } +func BenchmarkEncodeTwainConstant1e5(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e5) } +func BenchmarkEncodeTwainConstant1e6(b *testing.B) { benchmarkEncoder(b, twain, constant, 1e6) } +func BenchmarkEncodeTwainSpeed1e4(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e4) } +func BenchmarkEncodeTwainSpeed1e5(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e5) } +func BenchmarkEncodeTwainSpeed1e6(b *testing.B) { benchmarkEncoder(b, twain, speed, 1e6) } +func BenchmarkEncodeTwainDefault1e4(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e4) } +func BenchmarkEncodeTwainDefault1e5(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e5) } +func BenchmarkEncodeTwainDefault1e6(b *testing.B) { benchmarkEncoder(b, twain, default_, 1e6) } +func BenchmarkEncodeTwainCompress1e4(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e4) } +func BenchmarkEncodeTwainCompress1e5(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e5) } +func BenchmarkEncodeTwainCompress1e6(b *testing.B) { benchmarkEncoder(b, twain, compress, 1e6) } + +// A writer that fails after N writes. +type errorWriter struct { + N int +} + +func (e *errorWriter) Write(b []byte) (int, error) { + if e.N <= 0 { + return 0, io.ErrClosedPipe + } + e.N-- + return len(b), nil +} + +// Test if errors from the underlying writer is passed upwards. +func TestWriteError(t *testing.T) { + buf := new(bytes.Buffer) + n := 65536 + if !testing.Short() { + n *= 4 + } + for i := 0; i < n; i++ { + fmt.Fprintf(buf, "asdasfasf%d%dfghfgujyut%dyutyu\n", i, i, i) + } + in := buf.Bytes() + // We create our own buffer to control number of writes. + copyBuf := make([]byte, 128) + for l := 0; l < 10; l++ { + for fail := 1; fail <= 256; fail *= 2 { + // Fail after 'fail' writes + ew := &errorWriter{N: fail} + w, err := NewWriter(ew, l) + if err != nil { + t.Fatalf("NewWriter: level %d: %v", l, err) + } + n, err := copyBuffer(w, bytes.NewBuffer(in), copyBuf) + if err == nil { + t.Fatalf("Level %d: Expected an error, writer was %#v", l, ew) + } + n2, err := w.Write([]byte{1, 2, 2, 3, 4, 5}) + if n2 != 0 { + t.Fatal("Level", l, "Expected 0 length write, got", n) + } + if err == nil { + t.Fatal("Level", l, "Expected an error") + } + err = w.Flush() + if err == nil { + t.Fatal("Level", l, "Expected an error on flush") + } + err = w.Close() + if err == nil { + t.Fatal("Level", l, "Expected an error on close") + } + + w.Reset(ioutil.Discard) + n2, err = w.Write([]byte{1, 2, 3, 4, 5, 6}) + if err != nil { + t.Fatal("Level", l, "Got unexpected error after reset:", err) + } + if n2 == 0 { + t.Fatal("Level", l, "Got 0 length write, expected > 0") + } + if testing.Short() { + return + } + } + } +} + +func TestDeterministicL1(t *testing.T) { testDeterministic(1, t) } +func TestDeterministicL2(t *testing.T) { testDeterministic(2, t) } +func TestDeterministicL3(t *testing.T) { testDeterministic(3, t) } +func TestDeterministicL4(t *testing.T) { testDeterministic(4, t) } +func TestDeterministicL5(t *testing.T) { testDeterministic(5, t) } +func TestDeterministicL6(t *testing.T) { testDeterministic(6, t) } +func TestDeterministicL7(t *testing.T) { testDeterministic(7, t) } +func TestDeterministicL8(t *testing.T) { testDeterministic(8, t) } +func TestDeterministicL9(t *testing.T) { testDeterministic(9, t) } +func TestDeterministicL0(t *testing.T) { testDeterministic(0, t) } +func TestDeterministicLM2(t *testing.T) { testDeterministic(-2, t) } + +func testDeterministic(i int, t *testing.T) { + // Test so much we cross a good number of block boundaries. + var length = maxStoreBlockSize*30 + 500 + if testing.Short() { + length /= 10 + } + + // Create a random, but compressible stream. + rng := rand.New(rand.NewSource(1)) + t1 := make([]byte, length) + for i := range t1 { + t1[i] = byte(rng.Int63() & 7) + } + + // Do our first encode. + var b1 bytes.Buffer + br := bytes.NewBuffer(t1) + w, err := NewWriter(&b1, i) + if err != nil { + t.Fatal(err) + } + // Use a very small prime sized buffer. + cbuf := make([]byte, 787) + _, err = copyBuffer(w, br, cbuf) + if err != nil { + t.Fatal(err) + } + w.Close() + + // We choose a different buffer size, + // bigger than a maximum block, and also a prime. + var b2 bytes.Buffer + cbuf = make([]byte, 81761) + br2 := bytes.NewBuffer(t1) + w2, err := NewWriter(&b2, i) + if err != nil { + t.Fatal(err) + } + _, err = copyBuffer(w2, br2, cbuf) + if err != nil { + t.Fatal(err) + } + w2.Close() + + b1b := b1.Bytes() + b2b := b2.Bytes() + + if !bytes.Equal(b1b, b2b) { + t.Errorf("level %d did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) + } + + // Test using io.WriterTo interface. + var b3 bytes.Buffer + br = bytes.NewBuffer(t1) + w, err = NewWriter(&b3, i) + if err != nil { + t.Fatal(err) + } + _, err = br.WriteTo(w) + if err != nil { + t.Fatal(err) + } + w.Close() + + b3b := b3.Bytes() + if !bytes.Equal(b1b, b3b) { + t.Errorf("level %d (io.WriterTo) did not produce deterministic result, result mismatch, len(a) = %d, len(b) = %d", i, len(b1b), len(b3b)) + } +} + +// copyBuffer is a copy of io.CopyBuffer, since we want to support older go versions. +// This is modified to never use io.WriterTo or io.ReaderFrom interfaces. +func copyBuffer(dst io.Writer, src io.Reader, buf []byte) (written int64, err error) { + if buf == nil { + buf = make([]byte, 32*1024) + } + for { + nr, er := src.Read(buf) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er == io.EOF { + break + } + if er != nil { + err = er + break + } + } + return written, err +} diff --git a/vendor/github.com/klauspost/compress/gzip/example_test.go b/vendor/github.com/klauspost/compress/gzip/example_test.go new file mode 100644 index 0000000000..e32346bb95 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/example_test.go @@ -0,0 +1,128 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip_test + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "log" + "os" + "time" +) + +func Example_writerReader() { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + + // Setting the Header fields is optional. + zw.Name = "a-new-hope.txt" + zw.Comment = "an epic space opera by George Lucas" + zw.ModTime = time.Date(1977, time.May, 25, 0, 0, 0, 0, time.UTC) + + _, err := zw.Write([]byte("A long time ago in a galaxy far, far away...")) + if err != nil { + log.Fatal(err) + } + + if err := zw.Close(); err != nil { + log.Fatal(err) + } + + zr, err := gzip.NewReader(&buf) + if err != nil { + log.Fatal(err) + } + + fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC()) + + if _, err := io.Copy(os.Stdout, zr); err != nil { + log.Fatal(err) + } + + if err := zr.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // Name: a-new-hope.txt + // Comment: an epic space opera by George Lucas + // ModTime: 1977-05-25 00:00:00 +0000 UTC + // + // A long time ago in a galaxy far, far away... +} + +func ExampleReader_Multistream() { + var buf bytes.Buffer + zw := gzip.NewWriter(&buf) + + var files = []struct { + name string + comment string + modTime time.Time + data string + }{ + {"file-1.txt", "file-header-1", time.Date(2006, time.February, 1, 3, 4, 5, 0, time.UTC), "Hello Gophers - 1"}, + {"file-2.txt", "file-header-2", time.Date(2007, time.March, 2, 4, 5, 6, 1, time.UTC), "Hello Gophers - 2"}, + } + + for _, file := range files { + zw.Name = file.name + zw.Comment = file.comment + zw.ModTime = file.modTime + + if _, err := zw.Write([]byte(file.data)); err != nil { + log.Fatal(err) + } + + if err := zw.Close(); err != nil { + log.Fatal(err) + } + + zw.Reset(&buf) + } + + zr, err := gzip.NewReader(&buf) + if err != nil { + log.Fatal(err) + } + + for { + zr.Multistream(false) + fmt.Printf("Name: %s\nComment: %s\nModTime: %s\n\n", zr.Name, zr.Comment, zr.ModTime.UTC()) + + if _, err := io.Copy(os.Stdout, zr); err != nil { + log.Fatal(err) + } + + fmt.Println("\n") + + err = zr.Reset(&buf) + if err == io.EOF { + break + } + if err != nil { + log.Fatal(err) + } + } + + if err := zr.Close(); err != nil { + log.Fatal(err) + } + + // Output: + // Name: file-1.txt + // Comment: file-header-1 + // ModTime: 2006-02-01 03:04:05 +0000 UTC + // + // Hello Gophers - 1 + // + // Name: file-2.txt + // Comment: file-header-2 + // ModTime: 2007-03-02 04:05:06 +0000 UTC + // + // Hello Gophers - 2 +} diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip_test.go b/vendor/github.com/klauspost/compress/gzip/gunzip_test.go new file mode 100644 index 0000000000..c200ab1631 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gunzip_test.go @@ -0,0 +1,682 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "bytes" + oldgz "compress/gzip" + "crypto/rand" + "io" + "io/ioutil" + "os" + "strings" + "testing" + "time" + + "github.com/klauspost/compress/flate" +) + +type gunzipTest struct { + name string + desc string + raw string + gzip []byte + err error +} + +var gunzipTests = []gunzipTest{ + { // has 1 empty fixed-huffman block + "empty.txt", + "empty.txt", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xf7, 0x5e, 0x14, 0x4a, + 0x00, 0x03, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0x03, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + nil, + }, + { + "", + "empty - with no file name", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, + 0x00, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block + "hello.txt", + "hello.txt", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // concatenation + "hello.txt", + "hello.txt x2", + "hello world\n" + + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, + }, + nil, + }, + { // has a fixed huffman block with some length-distance pairs + "shesells.txt", + "shesells.txt", + "she sells seashells by the seashore\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0x72, 0x66, 0x8b, 0x4a, + 0x00, 0x03, 0x73, 0x68, 0x65, 0x73, 0x65, 0x6c, + 0x6c, 0x73, 0x2e, 0x74, 0x78, 0x74, 0x00, 0x2b, + 0xce, 0x48, 0x55, 0x28, 0x4e, 0xcd, 0xc9, 0x29, + 0x06, 0x92, 0x89, 0xc5, 0x19, 0x60, 0x56, 0x52, + 0xa5, 0x42, 0x09, 0x58, 0x18, 0x28, 0x90, 0x5f, + 0x94, 0xca, 0x05, 0x00, 0x76, 0xb0, 0x3b, 0xeb, + 0x24, 0x00, 0x00, 0x00, + }, + nil, + }, + { // has dynamic huffman blocks + "gettysburg", + "gettysburg", + " Four score and seven years ago our fathers brought forth on\n" + + "this continent, a new nation, conceived in Liberty, and dedicated\n" + + "to the proposition that all men are created equal.\n" + + " Now we are engaged in a great Civil War, testing whether that\n" + + "nation, or any nation so conceived and so dedicated, can long\n" + + "endure.\n" + + " We are met on a great battle-field of that war.\n" + + " We have come to dedicate a portion of that field, as a final\n" + + "resting place for those who here gave their lives that that\n" + + "nation might live. It is altogether fitting and proper that\n" + + "we should do this.\n" + + " But, in a larger sense, we can not dedicate — we can not\n" + + "consecrate — we can not hallow — this ground.\n" + + " The brave men, living and dead, who struggled here, have\n" + + "consecrated it, far above our poor power to add or detract.\n" + + "The world will little note, nor long remember what we say here,\n" + + "but it can never forget what they did here.\n" + + " It is for us the living, rather, to be dedicated here to the\n" + + "unfinished work which they who fought here have thus far so\n" + + "nobly advanced. It is rather for us to be here dedicated to\n" + + "the great task remaining before us — that from these honored\n" + + "dead we take increased devotion to that cause for which they\n" + + "gave the last full measure of devotion —\n" + + " that we here highly resolve that these dead shall not have\n" + + "died in vain — that this nation, under God, shall have a new\n" + + "birth of freedom — and that government of the people, by the\n" + + "people, for the people, shall not perish from this earth.\n" + + "\n" + + "Abraham Lincoln, November 19, 1863, Gettysburg, Pennsylvania\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xd1, 0x12, 0x2b, 0x4a, + 0x00, 0x03, 0x67, 0x65, 0x74, 0x74, 0x79, 0x73, + 0x62, 0x75, 0x72, 0x67, 0x00, 0x65, 0x54, 0xcd, + 0x6e, 0xd4, 0x30, 0x10, 0xbe, 0xfb, 0x29, 0xe6, + 0x01, 0x42, 0xa5, 0x0a, 0x09, 0xc1, 0x11, 0x90, + 0x40, 0x48, 0xa8, 0xe2, 0x80, 0xd4, 0xf3, 0x24, + 0x9e, 0x24, 0x56, 0xbd, 0x9e, 0xc5, 0x76, 0x76, + 0x95, 0x1b, 0x0f, 0xc1, 0x13, 0xf2, 0x24, 0x7c, + 0x63, 0x77, 0x9b, 0x4a, 0x5c, 0xaa, 0x6e, 0x6c, + 0xcf, 0x7c, 0x7f, 0x33, 0x44, 0x5f, 0x74, 0xcb, + 0x54, 0x26, 0xcd, 0x42, 0x9c, 0x3c, 0x15, 0xb9, + 0x48, 0xa2, 0x5d, 0x38, 0x17, 0xe2, 0x45, 0xc9, + 0x4e, 0x67, 0xae, 0xab, 0xe0, 0xf7, 0x98, 0x75, + 0x5b, 0xd6, 0x4a, 0xb3, 0xe6, 0xba, 0x92, 0x26, + 0x57, 0xd7, 0x50, 0x68, 0xd2, 0x54, 0x43, 0x92, + 0x54, 0x07, 0x62, 0x4a, 0x72, 0xa5, 0xc4, 0x35, + 0x68, 0x1a, 0xec, 0x60, 0x92, 0x70, 0x11, 0x4f, + 0x21, 0xd1, 0xf7, 0x30, 0x4a, 0xae, 0xfb, 0xd0, + 0x9a, 0x78, 0xf1, 0x61, 0xe2, 0x2a, 0xde, 0x55, + 0x25, 0xd4, 0xa6, 0x73, 0xd6, 0xb3, 0x96, 0x60, + 0xef, 0xf0, 0x9b, 0x2b, 0x71, 0x8c, 0x74, 0x02, + 0x10, 0x06, 0xac, 0x29, 0x8b, 0xdd, 0x25, 0xf9, + 0xb5, 0x71, 0xbc, 0x73, 0x44, 0x0f, 0x7a, 0xa5, + 0xab, 0xb4, 0x33, 0x49, 0x0b, 0x2f, 0xbd, 0x03, + 0xd3, 0x62, 0x17, 0xe9, 0x73, 0xb8, 0x84, 0x48, + 0x8f, 0x9c, 0x07, 0xaa, 0x52, 0x00, 0x6d, 0xa1, + 0xeb, 0x2a, 0xc6, 0xa0, 0x95, 0x76, 0x37, 0x78, + 0x9a, 0x81, 0x65, 0x7f, 0x46, 0x4b, 0x45, 0x5f, + 0xe1, 0x6d, 0x42, 0xe8, 0x01, 0x13, 0x5c, 0x38, + 0x51, 0xd4, 0xb4, 0x38, 0x49, 0x7e, 0xcb, 0x62, + 0x28, 0x1e, 0x3b, 0x82, 0x93, 0x54, 0x48, 0xf1, + 0xd2, 0x7d, 0xe4, 0x5a, 0xa3, 0xbc, 0x99, 0x83, + 0x44, 0x4f, 0x3a, 0x77, 0x36, 0x57, 0xce, 0xcf, + 0x2f, 0x56, 0xbe, 0x80, 0x90, 0x9e, 0x84, 0xea, + 0x51, 0x1f, 0x8f, 0xcf, 0x90, 0xd4, 0x60, 0xdc, + 0x5e, 0xb4, 0xf7, 0x10, 0x0b, 0x26, 0xe0, 0xff, + 0xc4, 0xd1, 0xe5, 0x67, 0x2e, 0xe7, 0xc8, 0x93, + 0x98, 0x05, 0xb8, 0xa8, 0x45, 0xc0, 0x4d, 0x09, + 0xdc, 0x84, 0x16, 0x2b, 0x0d, 0x9a, 0x21, 0x53, + 0x04, 0x8b, 0xd2, 0x0b, 0xbd, 0xa2, 0x4c, 0xa7, + 0x60, 0xee, 0xd9, 0xe1, 0x1d, 0xd1, 0xb7, 0x4a, + 0x30, 0x8f, 0x63, 0xd5, 0xa5, 0x8b, 0x33, 0x87, + 0xda, 0x1a, 0x18, 0x79, 0xf3, 0xe3, 0xa6, 0x17, + 0x94, 0x2e, 0xab, 0x6e, 0xa0, 0xe3, 0xcd, 0xac, + 0x50, 0x8c, 0xca, 0xa7, 0x0d, 0x76, 0x37, 0xd1, + 0x23, 0xe7, 0x05, 0x57, 0x8b, 0xa4, 0x22, 0x83, + 0xd9, 0x62, 0x52, 0x25, 0xad, 0x07, 0xbb, 0xbf, + 0xbf, 0xff, 0xbc, 0xfa, 0xee, 0x20, 0x73, 0x91, + 0x29, 0xff, 0x7f, 0x02, 0x71, 0x62, 0x84, 0xb5, + 0xf6, 0xb5, 0x25, 0x6b, 0x41, 0xde, 0x92, 0xb7, + 0x76, 0x3f, 0x91, 0x91, 0x31, 0x1b, 0x41, 0x84, + 0x62, 0x30, 0x0a, 0x37, 0xa4, 0x5e, 0x18, 0x3a, + 0x99, 0x08, 0xa5, 0xe6, 0x6d, 0x59, 0x22, 0xec, + 0x33, 0x39, 0x86, 0x26, 0xf5, 0xab, 0x66, 0xc8, + 0x08, 0x20, 0xcf, 0x0c, 0xd7, 0x47, 0x45, 0x21, + 0x0b, 0xf6, 0x59, 0xd5, 0xfe, 0x5c, 0x8d, 0xaa, + 0x12, 0x7b, 0x6f, 0xa1, 0xf0, 0x52, 0x33, 0x4f, + 0xf5, 0xce, 0x59, 0xd3, 0xab, 0x66, 0x10, 0xbf, + 0x06, 0xc4, 0x31, 0x06, 0x73, 0xd6, 0x80, 0xa2, + 0x78, 0xc2, 0x45, 0xcb, 0x03, 0x65, 0x39, 0xc9, + 0x09, 0xd1, 0x06, 0x04, 0x33, 0x1a, 0x5a, 0xf1, + 0xde, 0x01, 0xb8, 0x71, 0x83, 0xc4, 0xb5, 0xb3, + 0xc3, 0x54, 0x65, 0x33, 0x0d, 0x5a, 0xf7, 0x9b, + 0x90, 0x7c, 0x27, 0x1f, 0x3a, 0x58, 0xa3, 0xd8, + 0xfd, 0x30, 0x5f, 0xb7, 0xd2, 0x66, 0xa2, 0x93, + 0x1c, 0x28, 0xb7, 0xe9, 0x1b, 0x0c, 0xe1, 0x28, + 0x47, 0x26, 0xbb, 0xe9, 0x7d, 0x7e, 0xdc, 0x96, + 0x10, 0x92, 0x50, 0x56, 0x7c, 0x06, 0xe2, 0x27, + 0xb4, 0x08, 0xd3, 0xda, 0x7b, 0x98, 0x34, 0x73, + 0x9f, 0xdb, 0xf6, 0x62, 0xed, 0x31, 0x41, 0x13, + 0xd3, 0xa2, 0xa8, 0x4b, 0x3a, 0xc6, 0x1d, 0xe4, + 0x2f, 0x8c, 0xf8, 0xfb, 0x97, 0x64, 0xf4, 0xb6, + 0x2f, 0x80, 0x5a, 0xf3, 0x56, 0xe0, 0x40, 0x50, + 0xd5, 0x19, 0xd0, 0x1e, 0xfc, 0xca, 0xe5, 0xc9, + 0xd4, 0x60, 0x00, 0x81, 0x2e, 0xa3, 0xcc, 0xb6, + 0x52, 0xf0, 0xb4, 0xdb, 0x69, 0x99, 0xce, 0x7a, + 0x32, 0x4c, 0x08, 0xed, 0xaa, 0x10, 0x10, 0xe3, + 0x6f, 0xee, 0x99, 0x68, 0x95, 0x9f, 0x04, 0x71, + 0xb2, 0x49, 0x2f, 0x62, 0xa6, 0x5e, 0xb4, 0xef, + 0x02, 0xed, 0x4f, 0x27, 0xde, 0x4a, 0x0f, 0xfd, + 0xc1, 0xcc, 0xdd, 0x02, 0x8f, 0x08, 0x16, 0x54, + 0xdf, 0xda, 0xca, 0xe0, 0x82, 0xf1, 0xb4, 0x31, + 0x7a, 0xa9, 0x81, 0xfe, 0x90, 0xb7, 0x3e, 0xdb, + 0xd3, 0x35, 0xc0, 0x20, 0x80, 0x33, 0x46, 0x4a, + 0x63, 0xab, 0xd1, 0x0d, 0x29, 0xd2, 0xe2, 0x84, + 0xb8, 0xdb, 0xfa, 0xe9, 0x89, 0x44, 0x86, 0x7c, + 0xe8, 0x0b, 0xe6, 0x02, 0x6a, 0x07, 0x9b, 0x96, + 0xd0, 0xdb, 0x2e, 0x41, 0x4c, 0xa1, 0xd5, 0x57, + 0x45, 0x14, 0xfb, 0xe3, 0xa6, 0x72, 0x5b, 0x87, + 0x6e, 0x0c, 0x6d, 0x5b, 0xce, 0xe0, 0x2f, 0xe2, + 0x21, 0x81, 0x95, 0xb0, 0xe8, 0xb6, 0x32, 0x0b, + 0xb2, 0x98, 0x13, 0x52, 0x5d, 0xfb, 0xec, 0x63, + 0x17, 0x8a, 0x9e, 0x23, 0x22, 0x36, 0xee, 0xcd, + 0xda, 0xdb, 0xcf, 0x3e, 0xf1, 0xc7, 0xf1, 0x01, + 0x12, 0x93, 0x0a, 0xeb, 0x6f, 0xf2, 0x02, 0x15, + 0x96, 0x77, 0x5d, 0xef, 0x9c, 0xfb, 0x88, 0x91, + 0x59, 0xf9, 0x84, 0xdd, 0x9b, 0x26, 0x8d, 0x80, + 0xf9, 0x80, 0x66, 0x2d, 0xac, 0xf7, 0x1f, 0x06, + 0xba, 0x7f, 0xff, 0xee, 0xed, 0x40, 0x5f, 0xa5, + 0xd6, 0xbd, 0x8c, 0x5b, 0x46, 0xd2, 0x7e, 0x48, + 0x4a, 0x65, 0x8f, 0x08, 0x42, 0x60, 0xf7, 0x0f, + 0xb9, 0x16, 0x0b, 0x0c, 0x1a, 0x06, 0x00, 0x00, + }, + nil, + }, + { // has 1 non-empty fixed huffman block then garbage + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, 'g', 'a', 'r', 'b', 'a', 'g', 'e', '!', '!', '!', + }, + ErrHeader, + }, + { // has 1 non-empty fixed huffman block not enough header + "hello.txt", + "hello.txt + garbage", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0x0c, 0x00, + 0x00, 0x00, gzipID1, + }, + io.ErrUnexpectedEOF, + }, + { // has 1 non-empty fixed huffman block but corrupt checksum + "hello.txt", + "hello.txt + corrupt checksum", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, + { // has 1 non-empty fixed huffman block but corrupt size + "hello.txt", + "hello.txt + corrupt size", + "hello world\n", + []byte{ + 0x1f, 0x8b, 0x08, 0x08, 0xc8, 0x58, 0x13, 0x4a, + 0x00, 0x03, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x2e, + 0x74, 0x78, 0x74, 0x00, 0xcb, 0x48, 0xcd, 0xc9, + 0xc9, 0x57, 0x28, 0xcf, 0x2f, 0xca, 0x49, 0xe1, + 0x02, 0x00, 0x2d, 0x3b, 0x08, 0xaf, 0xff, 0x00, + 0x00, 0x00, + }, + ErrChecksum, + }, + { + "f1l3n4m3.tXt", + "header with all fields used", + "", + []byte{ + 0x1f, 0x8b, 0x08, 0x1e, 0x70, 0xf0, 0xf9, 0x4a, + 0x00, 0xaa, 0x09, 0x00, 0x7a, 0x7a, 0x05, 0x00, + 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x31, 0x6c, + 0x33, 0x6e, 0x34, 0x6d, 0x33, 0x2e, 0x74, 0x58, + 0x74, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, + 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, + 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, + 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, + 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, + 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, + 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, + 0x3f, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, + 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, + 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, + 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, + 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, + 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, + 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, + 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, + 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, + 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, + 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, + 0x9f, 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, + 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, + 0xaf, 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, + 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, + 0xbf, 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, + 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, + 0xcf, 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, + 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, + 0xdf, 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, + 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, + 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, + 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, + 0xff, 0x00, 0x92, 0xfd, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, + }, + nil, + }, + { + "", + "truncated gzip file amid raw-block", + "hello", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0x00, 0x0c, 0x00, 0xf3, 0xff, 0x68, 0x65, 0x6c, 0x6c, 0x6f, + }, + io.ErrUnexpectedEOF, + }, + { + "", + "truncated gzip file amid fixed-block", + "He", + []byte{ + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, + 0xf2, 0x48, 0xcd, + }, + io.ErrUnexpectedEOF, + }, +} + +func TestDecompressor(t *testing.T) { + b := new(bytes.Buffer) + for _, tt := range gunzipTests { + in := bytes.NewReader(tt.gzip) + gzip, err := NewReader(in) + if err != nil { + t.Errorf("%s: NewReader: %s", tt.name, err) + continue + } + defer gzip.Close() + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err := io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s := b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + + // Test Reader Reset. + in = bytes.NewReader(tt.gzip) + err = gzip.Reset(in) + if err != nil { + t.Errorf("%s: Reset: %s", tt.name, err) + continue + } + if tt.name != gzip.Name { + t.Errorf("%s: got name %s", tt.name, gzip.Name) + } + b.Reset() + n, err = io.Copy(b, gzip) + if err != tt.err { + t.Errorf("%s: io.Copy: %v want %v", tt.name, err, tt.err) + } + s = b.String() + if s != tt.raw { + t.Errorf("%s: got %d-byte %q want %d-byte %q", tt.name, n, s, len(tt.raw), tt.raw) + } + } +} + +func TestIssue6550(t *testing.T) { + f, err := os.Open("testdata/issue6550.gz") + if err != nil { + t.Fatal(err) + } + gzip, err := NewReader(f) + if err != nil { + t.Fatalf("NewReader(testdata/issue6550.gz): %v", err) + } + defer gzip.Close() + done := make(chan bool, 1) + go func() { + _, err := io.Copy(ioutil.Discard, gzip) + if err == nil { + t.Errorf("Copy succeeded") + } else { + t.Logf("Copy failed (correctly): %v", err) + } + done <- true + }() + select { + case <-time.After(1 * time.Second): + t.Errorf("Copy hung") + case <-done: + // ok + } +} + +func TestInitialReset(t *testing.T) { + var r Reader + if err := r.Reset(bytes.NewReader(gunzipTests[1].gzip)); err != nil { + t.Error(err) + } + var buf bytes.Buffer + if _, err := io.Copy(&buf, &r); err != nil { + t.Error(err) + } + if s := buf.String(); s != gunzipTests[1].raw { + t.Errorf("got %q want %q", s, gunzipTests[1].raw) + } +} + +func TestMultistreamFalse(t *testing.T) { + // Find concatenation test. + var tt gunzipTest + for _, tt = range gunzipTests { + if strings.HasSuffix(tt.desc, " x2") { + goto Found + } + } + t.Fatal("cannot find hello.txt x2 in gunzip tests") + +Found: + br := bytes.NewReader(tt.gzip) + var r Reader + if err := r.Reset(br); err != nil { + t.Fatalf("first reset: %v", err) + } + + // Expect two streams with "hello world\n", then real EOF. + const hello = "hello world\n" + + r.Multistream(false) + data, err := ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("first stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != nil { + t.Fatalf("second reset: %v", err) + } + r.Multistream(false) + data, err = ioutil.ReadAll(&r) + if string(data) != hello || err != nil { + t.Fatalf("second stream = %q, %v, want %q, %v", string(data), err, hello, nil) + } + + if err := r.Reset(br); err != io.EOF { + t.Fatalf("third reset: err=%v, want io.EOF", err) + } +} + +func TestWriteTo(t *testing.T) { + input := make([]byte, 100000) + n, err := rand.Read(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + compressed := &bytes.Buffer{} + // Do it twice to test MultiStream functionality + for i := 0; i < 2; i++ { + w, err := NewWriterLevel(compressed, -2) + if err != nil { + t.Fatal(err) + } + n, err = w.Write(input) + if err != nil { + t.Fatal(err) + } + if n != len(input) { + t.Fatal("did not fill buffer") + } + w.Close() + } + input = append(input, input...) + buf := compressed.Bytes() + + dec, err := NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + // ReadAll does not use WriteTo, but we wrap it in a NopCloser to be sure. + readall, err := ioutil.ReadAll(ioutil.NopCloser(dec)) + if err != nil { + t.Fatal(err) + } + if len(readall) != len(input) { + t.Errorf("did not decompress everything, want %d, got %d", len(input), len(readall)) + } + if bytes.Compare(readall, input) != 0 { + t.Error("output did not match input") + } + + dec, err = NewReader(bytes.NewBuffer(buf)) + if err != nil { + t.Fatal(err) + } + wtbuf := &bytes.Buffer{} + written, err := dec.WriteTo(wtbuf) + if err != nil { + t.Fatal(err) + } + if written != int64(len(input)) { + t.Error("Returned length did not match, expected", len(input), "got", written) + } + if wtbuf.Len() != len(input) { + t.Error("Actual Length did not match, expected", len(input), "got", wtbuf.Len()) + } + if bytes.Compare(wtbuf.Bytes(), input) != 0 { + t.Fatal("output did not match input") + } +} + +func TestNilStream(t *testing.T) { + // Go liberally interprets RFC 1952 section 2.2 to mean that a gzip file + // consist of zero or more members. Thus, we test that a nil stream is okay. + _, err := NewReader(bytes.NewReader(nil)) + if err != io.EOF { + t.Fatalf("NewReader(nil) on empty stream: got %v, want io.EOF", err) + } +} + +func TestTruncatedStreams(t *testing.T) { + const data = "\x1f\x8b\b\x04\x00\tn\x88\x00\xff\a\x00foo bar\xcbH\xcd\xc9\xc9\xd7Q(\xcf/\xcaI\x01\x04:r\xab\xff\f\x00\x00\x00" + + // Intentionally iterate starting with at least one byte in the stream. + for i := 1; i < len(data)-1; i++ { + r, err := NewReader(strings.NewReader(data[:i])) + if err != nil { + if err != io.ErrUnexpectedEOF { + t.Errorf("NewReader(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF) + } + continue + } + _, err = io.Copy(ioutil.Discard, r) + if ferr, ok := err.(*flate.ReadError); ok { + err = ferr.Err + } + if err != io.ErrUnexpectedEOF { + t.Errorf("io.Copy(%d) on truncated stream: got %v, want %v", i, err, io.ErrUnexpectedEOF) + } + } +} + +func BenchmarkGunzipCopy(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + r, err := NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipNoWriteTo(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, ioutil.NopCloser(r)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkGunzipStdlib(b *testing.B) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dst := &bytes.Buffer{} + w, _ := NewWriterLevel(dst, 1) + _, err := w.Write(dat) + if err != nil { + b.Fatal(err) + } + w.Close() + input := dst.Bytes() + r, err := oldgz.NewReader(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(dat))) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := r.Reset(bytes.NewBuffer(input)) + if err != nil { + b.Fatal(err) + } + _, err = io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/klauspost/compress/gzip/gzip_test.go b/vendor/github.com/klauspost/compress/gzip/gzip_test.go new file mode 100644 index 0000000000..b18bb54b04 --- /dev/null +++ b/vendor/github.com/klauspost/compress/gzip/gzip_test.go @@ -0,0 +1,519 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gzip + +import ( + "bufio" + "bytes" + oldgz "compress/gzip" + "io" + "io/ioutil" + "math/rand" + "testing" + "time" +) + +// TestEmpty tests that an empty payload still forms a valid GZIP stream. +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + + if err := NewWriter(buf).Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if len(b) != 0 { + t.Fatalf("got %d bytes, want 0", len(b)) + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestRoundTrip tests that gzipping and then gunzipping is the identity +// function. +func TestRoundTrip(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + if _, err := w.Write([]byte("payload")); err != nil { + t.Fatalf("Write: %v", err) + } + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + + r, err := NewReader(buf) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("ReadAll: %v", err) + } + if string(b) != "payload" { + t.Fatalf("payload is %q, want %q", string(b), "payload") + } + if r.Comment != "comment" { + t.Fatalf("comment is %q, want %q", r.Comment, "comment") + } + if string(r.Extra) != "extra" { + t.Fatalf("extra is %q, want %q", r.Extra, "extra") + } + if r.ModTime.Unix() != 1e8 { + t.Fatalf("mtime is %d, want %d", r.ModTime.Unix(), uint32(1e8)) + } + if r.Name != "name" { + t.Fatalf("name is %q, want %q", r.Name, "name") + } + if err := r.Close(); err != nil { + t.Fatalf("Reader.Close: %v", err) + } +} + +// TestLatin1 tests the internal functions for converting to and from Latin-1. +func TestLatin1(t *testing.T) { + latin1 := []byte{0xc4, 'u', 0xdf, 'e', 'r', 'u', 'n', 'g', 0} + utf8 := "Äußerung" + z := Reader{r: bufio.NewReader(bytes.NewReader(latin1))} + s, err := z.readString() + if err != nil { + t.Fatalf("readString: %v", err) + } + if s != utf8 { + t.Fatalf("read latin-1: got %q, want %q", s, utf8) + } + + buf := bytes.NewBuffer(make([]byte, 0, len(latin1))) + c := Writer{w: buf} + if err = c.writeString(utf8); err != nil { + t.Fatalf("writeString: %v", err) + } + s = buf.String() + if s != string(latin1) { + t.Fatalf("write utf-8: got %q, want %q", s, string(latin1)) + } +} + +// TestLatin1RoundTrip tests that metadata that is representable in Latin-1 +// survives a round trip. +func TestLatin1RoundTrip(t *testing.T) { + testCases := []struct { + name string + ok bool + }{ + {"", true}, + {"ASCII is OK", true}, + {"unless it contains a NUL\x00", false}, + {"no matter where \x00 occurs", false}, + {"\x00\x00\x00", false}, + {"Látin-1 also passes (U+00E1)", true}, + {"but LĀtin Extended-A (U+0100) does not", false}, + {"neither does 日本語", false}, + {"invalid UTF-8 also \xffails", false}, + {"\x00 as does Látin-1 with NUL", false}, + } + for _, tc := range testCases { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Name = tc.name + err := w.Close() + if (err == nil) != tc.ok { + t.Errorf("Writer.Close: name = %q, err = %v", tc.name, err) + continue + } + if !tc.ok { + continue + } + + r, err := NewReader(buf) + if err != nil { + t.Errorf("NewReader: %v", err) + continue + } + _, err = ioutil.ReadAll(r) + if err != nil { + t.Errorf("ReadAll: %v", err) + continue + } + if r.Name != tc.name { + t.Errorf("name is %q, want %q", r.Name, tc.name) + continue + } + if err := r.Close(); err != nil { + t.Errorf("Reader.Close: %v", err) + continue + } + } +} + +func TestWriterFlush(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w.Comment = "comment" + w.Extra = []byte("extra") + w.ModTime = time.Unix(1e8, 0) + w.Name = "name" + + n0 := buf.Len() + if n0 != 0 { + t.Fatalf("buffer size = %d before writes; want 0", n0) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n1 := buf.Len() + if n1 == 0 { + t.Fatal("no data after first flush") + } + + w.Write([]byte("x")) + + n2 := buf.Len() + if n1 != n2 { + t.Fatalf("after writing a single byte, size changed from %d to %d; want no change", n1, n2) + } + + if err := w.Flush(); err != nil { + t.Fatal(err) + } + + n3 := buf.Len() + if n2 == n3 { + t.Fatal("Flush didn't flush any data") + } +} + +// Multiple gzip files concatenated form a valid gzip file. +func TestConcat(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(&buf) + w.Write([]byte("hello ")) + w.Close() + w = NewWriter(&buf) + w.Write([]byte("world\n")) + w.Close() + + r, err := NewReader(&buf) + data, err := ioutil.ReadAll(r) + if string(data) != "hello world\n" || err != nil { + t.Fatalf("ReadAll = %q, %v, want %q, nil", data, err, "hello world") + } +} + +func TestWriterReset(t *testing.T) { + buf := new(bytes.Buffer) + buf2 := new(bytes.Buffer) + z := NewWriter(buf) + msg := []byte("hello world") + z.Write(msg) + z.Close() + z.Reset(buf2) + z.Write(msg) + z.Close() + if buf.String() != buf2.String() { + t.Errorf("buf2 %q != original buf of %q", buf2.String(), buf.String()) + } +} + +var testbuf []byte + +func testFile(i, level int, t *testing.T) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dl := len(dat) + if len(testbuf) != i*dl { + // Make results predictable + testbuf = make([]byte, i*dl) + for j := 0; j < i; j++ { + copy(testbuf[j*dl:j*dl+dl], dat) + } + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, err := NewWriterLevel(&buf, DefaultCompression) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + if int(n) != len(testbuf) { + t.Fatal("Short write:", n, "!=", testbuf) + } + err = w.Close() + if err != nil { + t.Fatal(err) + } + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestFile1xM2(t *testing.T) { testFile(1, -2, t) } +func TestFile1xM1(t *testing.T) { testFile(1, -1, t) } +func TestFile1x0(t *testing.T) { testFile(1, 0, t) } +func TestFile1x1(t *testing.T) { testFile(1, 1, t) } +func TestFile1x2(t *testing.T) { testFile(1, 2, t) } +func TestFile1x3(t *testing.T) { testFile(1, 3, t) } +func TestFile1x4(t *testing.T) { testFile(1, 4, t) } +func TestFile1x5(t *testing.T) { testFile(1, 5, t) } +func TestFile1x6(t *testing.T) { testFile(1, 6, t) } +func TestFile1x7(t *testing.T) { testFile(1, 7, t) } +func TestFile1x8(t *testing.T) { testFile(1, 8, t) } +func TestFile1x9(t *testing.T) { testFile(1, 9, t) } +func TestFile10(t *testing.T) { testFile(10, DefaultCompression, t) } + +func TestFile50(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(50, DefaultCompression, t) +} + +func TestFile200(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testFile(200, BestSpeed, t) +} + +func testBigGzip(i int, t *testing.T) { + if len(testbuf) != i { + // Make results predictable + rand.Seed(1337) + testbuf = make([]byte, i) + for idx := range testbuf { + testbuf[idx] = byte(65 + rand.Intn(20)) + } + } + c := BestCompression + if testing.Short() { + c = BestSpeed + } + + br := bytes.NewBuffer(testbuf) + var buf bytes.Buffer + w, err := NewWriterLevel(&buf, c) + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + if int(n) != len(testbuf) { + t.Fatal("Short write:", n, "!=", len(testbuf)) + } + err = w.Close() + if err != nil { + t.Fatal(err.Error()) + } + + r, err := NewReader(&buf) + if err != nil { + t.Fatal(err.Error()) + } + decoded, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err.Error()) + } + if !bytes.Equal(testbuf, decoded) { + t.Errorf("decoded content does not match.") + } +} + +func TestGzip1K(t *testing.T) { testBigGzip(1000, t) } +func TestGzip100K(t *testing.T) { testBigGzip(100000, t) } +func TestGzip1M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + + testBigGzip(1000000, t) +} +func TestGzip10M(t *testing.T) { + if testing.Short() { + t.Skip("skipping during short test") + } + testBigGzip(10000000, t) +} + +// Test if two runs produce identical results. +func TestDeterministicLM2(t *testing.T) { testDeterm(-2, t) } + +// Level 0 is not deterministic since it depends on the size of each write. +// func TestDeterministicL0(t *testing.T) { testDeterm(0, t) } +func TestDeterministicL1(t *testing.T) { testDeterm(1, t) } +func TestDeterministicL2(t *testing.T) { testDeterm(2, t) } +func TestDeterministicL3(t *testing.T) { testDeterm(3, t) } +func TestDeterministicL4(t *testing.T) { testDeterm(4, t) } +func TestDeterministicL5(t *testing.T) { testDeterm(5, t) } +func TestDeterministicL6(t *testing.T) { testDeterm(6, t) } +func TestDeterministicL7(t *testing.T) { testDeterm(7, t) } +func TestDeterministicL8(t *testing.T) { testDeterm(8, t) } +func TestDeterministicL9(t *testing.T) { testDeterm(9, t) } + +func testDeterm(i int, t *testing.T) { + var length = 500000 + if testing.Short() { + length = 100000 + } + rand.Seed(1337) + t1 := make([]byte, length) + for idx := range t1 { + t1[idx] = byte(65 + rand.Intn(8)) + } + + br := bytes.NewBuffer(t1) + var b1 bytes.Buffer + w, err := NewWriterLevel(&b1, i) + if err != nil { + t.Fatal(err) + } + _, err = io.Copy(w, br) + if err != nil { + t.Fatal(err) + } + w.Flush() + w.Close() + + // We recreate the buffer, so we have a goos chance of getting a + // different memory address. + rand.Seed(1337) + t2 := make([]byte, length) + for idx := range t2 { + t2[idx] = byte(65 + rand.Intn(8)) + } + + br2 := bytes.NewBuffer(t2) + var b2 bytes.Buffer + w2, err := NewWriterLevel(&b2, i) + if err != nil { + t.Fatal(err) + } + + // We write the same data, but with a different size than + // the default copy. + for { + _, err = io.CopyN(w2, br2, 1234) + if err == io.EOF { + err = nil + break + } else if err != nil { + break + } + } + if err != nil { + t.Fatal(err) + } + w2.Flush() + w2.Close() + + b1b := b1.Bytes() + b2b := b2.Bytes() + + if bytes.Compare(b1b, b2b) != 0 { + t.Fatalf("Level %d did not produce deterministric result, len(a) = %d, len(b) = %d", i, len(b1b), len(b2b)) + } +} + +func BenchmarkGzipLM2(b *testing.B) { benchmarkGzipN(b, -2) } +func BenchmarkGzipL1(b *testing.B) { benchmarkGzipN(b, 1) } +func BenchmarkGzipL2(b *testing.B) { benchmarkGzipN(b, 2) } +func BenchmarkGzipL3(b *testing.B) { benchmarkGzipN(b, 3) } +func BenchmarkGzipL4(b *testing.B) { benchmarkGzipN(b, 4) } +func BenchmarkGzipL5(b *testing.B) { benchmarkGzipN(b, 5) } +func BenchmarkGzipL6(b *testing.B) { benchmarkGzipN(b, 6) } +func BenchmarkGzipL7(b *testing.B) { benchmarkGzipN(b, 7) } +func BenchmarkGzipL8(b *testing.B) { benchmarkGzipN(b, 8) } +func BenchmarkGzipL9(b *testing.B) { benchmarkGzipN(b, 9) } + +func benchmarkGzipN(b *testing.B, level int) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + b.SetBytes(int64(len(dat))) + w, _ := NewWriterLevel(ioutil.Discard, level) + b.ResetTimer() + for n := 0; n < b.N; n++ { + w.Reset(ioutil.Discard) + n, err := w.Write(dat) + if n != len(dat) { + panic("short write") + } + if err != nil { + panic(err) + } + err = w.Close() + if err != nil { + panic(err) + } + } +} + +func BenchmarkOldGzipL1(b *testing.B) { benchmarkOldGzipN(b, 1) } +func BenchmarkOldGzipL2(b *testing.B) { benchmarkOldGzipN(b, 2) } +func BenchmarkOldGzipL3(b *testing.B) { benchmarkOldGzipN(b, 3) } +func BenchmarkOldGzipL4(b *testing.B) { benchmarkOldGzipN(b, 4) } +func BenchmarkOldGzipL5(b *testing.B) { benchmarkOldGzipN(b, 5) } +func BenchmarkOldGzipL6(b *testing.B) { benchmarkOldGzipN(b, 6) } +func BenchmarkOldGzipL7(b *testing.B) { benchmarkOldGzipN(b, 7) } +func BenchmarkOldGzipL8(b *testing.B) { benchmarkOldGzipN(b, 8) } +func BenchmarkOldGzipL9(b *testing.B) { benchmarkOldGzipN(b, 9) } + +func benchmarkOldGzipN(b *testing.B, level int) { + dat, _ := ioutil.ReadFile("testdata/test.json") + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + dat = append(dat, dat...) + + b.SetBytes(int64(len(dat))) + w, _ := oldgz.NewWriterLevel(ioutil.Discard, level) + b.ResetTimer() + for n := 0; n < b.N; n++ { + w.Reset(ioutil.Discard) + n, err := w.Write(dat) + if n != len(dat) { + panic("short write") + } + if err != nil { + panic(err) + } + err = w.Close() + if err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/klauspost/cpuid/.gitignore b/vendor/github.com/klauspost/cpuid/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/cpuid/.travis.yml b/vendor/github.com/klauspost/cpuid/.travis.yml new file mode 100644 index 0000000000..bde823d8ab --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/vendor/github.com/klauspost/cpuid/cpuid_test.go b/vendor/github.com/klauspost/cpuid/cpuid_test.go new file mode 100644 index 0000000000..54d2cbc519 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/cpuid_test.go @@ -0,0 +1,727 @@ +// Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. + +package cpuid + +import ( + "fmt" + "testing" +) + +// There is no real way to test a CPU identifier, since results will +// obviously differ on each machine. +func TestCPUID(t *testing.T) { + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("Name:", CPU.BrandName) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + + if CPU.SSE2() { + t.Log("We have SSE2") + } +} + +func TestDumpCPUID(t *testing.T) { + n := int(maxFunctionID()) + for i := 0; i <= n; i++ { + a, b, c, d := cpuidex(uint32(i), 0) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + ex := uint32(1) + for { + a2, b2, c2, d2 := cpuidex(uint32(i), ex) + if a2 == a && b2 == b && d2 == d || ex > 50 || a2 == 0 { + break + } + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a2, b2, c2, d2) + a, b, c, d = a2, b2, c2, d2 + ex++ + } + } + n2 := maxExtendedFunction() + for i := uint32(0x80000000); i <= n2; i++ { + a, b, c, d := cpuid(i) + t.Logf("CPUID %08x: %08x-%08x-%08x-%08x", i, a, b, c, d) + } +} + +func Example() { + // Print basic CPU information: + fmt.Println("Name:", CPU.BrandName) + fmt.Println("PhysicalCores:", CPU.PhysicalCores) + fmt.Println("ThreadsPerCore:", CPU.ThreadsPerCore) + fmt.Println("LogicalCores:", CPU.LogicalCores) + fmt.Println("Family", CPU.Family, "Model:", CPU.Model) + fmt.Println("Features:", CPU.Features) + fmt.Println("Cacheline bytes:", CPU.CacheLine) + + // Test if we have a specific feature: + if CPU.SSE() { + fmt.Println("We have Streaming SIMD Extensions") + } +} + +func TestBrandNameZero(t *testing.T) { + if len(CPU.BrandName) > 0 { + // Cut out last byte + last := []byte(CPU.BrandName[len(CPU.BrandName)-1:]) + if last[0] == 0 { + t.Fatal("last byte was zero") + } else if last[0] == 32 { + t.Fatal("whitespace wasn't trimmed") + } + } +} + +// Generated here: http://play.golang.org/p/mko-0tFt0Q + +// TestCmov tests Cmov() function +func TestCmov(t *testing.T) { + got := CPU.Cmov() + expected := CPU.Features&CMOV == CMOV + if got != expected { + t.Fatalf("Cmov: expected %v, got %v", expected, got) + } + t.Log("CMOV Support:", got) +} + +// TestAmd3dnow tests Amd3dnow() function +func TestAmd3dnow(t *testing.T) { + got := CPU.Amd3dnow() + expected := CPU.Features&AMD3DNOW == AMD3DNOW + if got != expected { + t.Fatalf("Amd3dnow: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOW Support:", got) +} + +// TestAmd3dnowExt tests Amd3dnowExt() function +func TestAmd3dnowExt(t *testing.T) { + got := CPU.Amd3dnowExt() + expected := CPU.Features&AMD3DNOWEXT == AMD3DNOWEXT + if got != expected { + t.Fatalf("Amd3dnowExt: expected %v, got %v", expected, got) + } + t.Log("AMD3DNOWEXT Support:", got) +} + +// TestMMX tests MMX() function +func TestMMX(t *testing.T) { + got := CPU.MMX() + expected := CPU.Features&MMX == MMX + if got != expected { + t.Fatalf("MMX: expected %v, got %v", expected, got) + } + t.Log("MMX Support:", got) +} + +// TestMMXext tests MMXext() function +func TestMMXext(t *testing.T) { + got := CPU.MMXExt() + expected := CPU.Features&MMXEXT == MMXEXT + if got != expected { + t.Fatalf("MMXExt: expected %v, got %v", expected, got) + } + t.Log("MMXEXT Support:", got) +} + +// TestSSE tests SSE() function +func TestSSE(t *testing.T) { + got := CPU.SSE() + expected := CPU.Features&SSE == SSE + if got != expected { + t.Fatalf("SSE: expected %v, got %v", expected, got) + } + t.Log("SSE Support:", got) +} + +// TestSSE2 tests SSE2() function +func TestSSE2(t *testing.T) { + got := CPU.SSE2() + expected := CPU.Features&SSE2 == SSE2 + if got != expected { + t.Fatalf("SSE2: expected %v, got %v", expected, got) + } + t.Log("SSE2 Support:", got) +} + +// TestSSE3 tests SSE3() function +func TestSSE3(t *testing.T) { + got := CPU.SSE3() + expected := CPU.Features&SSE3 == SSE3 + if got != expected { + t.Fatalf("SSE3: expected %v, got %v", expected, got) + } + t.Log("SSE3 Support:", got) +} + +// TestSSSE3 tests SSSE3() function +func TestSSSE3(t *testing.T) { + got := CPU.SSSE3() + expected := CPU.Features&SSSE3 == SSSE3 + if got != expected { + t.Fatalf("SSSE3: expected %v, got %v", expected, got) + } + t.Log("SSSE3 Support:", got) +} + +// TestSSE4 tests SSE4() function +func TestSSE4(t *testing.T) { + got := CPU.SSE4() + expected := CPU.Features&SSE4 == SSE4 + if got != expected { + t.Fatalf("SSE4: expected %v, got %v", expected, got) + } + t.Log("SSE4 Support:", got) +} + +// TestSSE42 tests SSE42() function +func TestSSE42(t *testing.T) { + got := CPU.SSE42() + expected := CPU.Features&SSE42 == SSE42 + if got != expected { + t.Fatalf("SSE42: expected %v, got %v", expected, got) + } + t.Log("SSE42 Support:", got) +} + +// TestAVX tests AVX() function +func TestAVX(t *testing.T) { + got := CPU.AVX() + expected := CPU.Features&AVX == AVX + if got != expected { + t.Fatalf("AVX: expected %v, got %v", expected, got) + } + t.Log("AVX Support:", got) +} + +// TestAVX2 tests AVX2() function +func TestAVX2(t *testing.T) { + got := CPU.AVX2() + expected := CPU.Features&AVX2 == AVX2 + if got != expected { + t.Fatalf("AVX2: expected %v, got %v", expected, got) + } + t.Log("AVX2 Support:", got) +} + +// TestFMA3 tests FMA3() function +func TestFMA3(t *testing.T) { + got := CPU.FMA3() + expected := CPU.Features&FMA3 == FMA3 + if got != expected { + t.Fatalf("FMA3: expected %v, got %v", expected, got) + } + t.Log("FMA3 Support:", got) +} + +// TestFMA4 tests FMA4() function +func TestFMA4(t *testing.T) { + got := CPU.FMA4() + expected := CPU.Features&FMA4 == FMA4 + if got != expected { + t.Fatalf("FMA4: expected %v, got %v", expected, got) + } + t.Log("FMA4 Support:", got) +} + +// TestXOP tests XOP() function +func TestXOP(t *testing.T) { + got := CPU.XOP() + expected := CPU.Features&XOP == XOP + if got != expected { + t.Fatalf("XOP: expected %v, got %v", expected, got) + } + t.Log("XOP Support:", got) +} + +// TestF16C tests F16C() function +func TestF16C(t *testing.T) { + got := CPU.F16C() + expected := CPU.Features&F16C == F16C + if got != expected { + t.Fatalf("F16C: expected %v, got %v", expected, got) + } + t.Log("F16C Support:", got) +} + +// TestCX16 tests CX16() function +func TestCX16(t *testing.T) { + got := CPU.CX16() + expected := CPU.Features&CX16 == CX16 + if got != expected { + t.Fatalf("CX16: expected %v, got %v", expected, got) + } + t.Log("CX16 Support:", got) +} + +// TestSGX tests SGX() function +func TestSGX(t *testing.T) { + got := CPU.SGX.Available + expected := CPU.Features&SGX == SGX + if got != expected { + t.Fatalf("SGX: expected %v, got %v", expected, got) + } + t.Log("SGX Support:", got) +} + +// TestBMI1 tests BMI1() function +func TestBMI1(t *testing.T) { + got := CPU.BMI1() + expected := CPU.Features&BMI1 == BMI1 + if got != expected { + t.Fatalf("BMI1: expected %v, got %v", expected, got) + } + t.Log("BMI1 Support:", got) +} + +// TestBMI2 tests BMI2() function +func TestBMI2(t *testing.T) { + got := CPU.BMI2() + expected := CPU.Features&BMI2 == BMI2 + if got != expected { + t.Fatalf("BMI2: expected %v, got %v", expected, got) + } + t.Log("BMI2 Support:", got) +} + +// TestTBM tests TBM() function +func TestTBM(t *testing.T) { + got := CPU.TBM() + expected := CPU.Features&TBM == TBM + if got != expected { + t.Fatalf("TBM: expected %v, got %v", expected, got) + } + t.Log("TBM Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestLzcnt(t *testing.T) { + got := CPU.Lzcnt() + expected := CPU.Features&LZCNT == LZCNT + if got != expected { + t.Fatalf("Lzcnt: expected %v, got %v", expected, got) + } + t.Log("LZCNT Support:", got) +} + +// TestLzcnt tests Lzcnt() function +func TestPopcnt(t *testing.T) { + got := CPU.Popcnt() + expected := CPU.Features&POPCNT == POPCNT + if got != expected { + t.Fatalf("Popcnt: expected %v, got %v", expected, got) + } + t.Log("POPCNT Support:", got) +} + +// TestAesNi tests AesNi() function +func TestAesNi(t *testing.T) { + got := CPU.AesNi() + expected := CPU.Features&AESNI == AESNI + if got != expected { + t.Fatalf("AesNi: expected %v, got %v", expected, got) + } + t.Log("AESNI Support:", got) +} + +// TestHTT tests HTT() function +func TestHTT(t *testing.T) { + got := CPU.HTT() + expected := CPU.Features&HTT == HTT + if got != expected { + t.Fatalf("HTT: expected %v, got %v", expected, got) + } + t.Log("HTT Support:", got) +} + +// TestClmul tests Clmul() function +func TestClmul(t *testing.T) { + got := CPU.Clmul() + expected := CPU.Features&CLMUL == CLMUL + if got != expected { + t.Fatalf("Clmul: expected %v, got %v", expected, got) + } + t.Log("CLMUL Support:", got) +} + +// TestSSE2Slow tests SSE2Slow() function +func TestSSE2Slow(t *testing.T) { + got := CPU.SSE2Slow() + expected := CPU.Features&SSE2SLOW == SSE2SLOW + if got != expected { + t.Fatalf("SSE2Slow: expected %v, got %v", expected, got) + } + t.Log("SSE2SLOW Support:", got) +} + +// TestSSE3Slow tests SSE3slow() function +func TestSSE3Slow(t *testing.T) { + got := CPU.SSE3Slow() + expected := CPU.Features&SSE3SLOW == SSE3SLOW + if got != expected { + t.Fatalf("SSE3slow: expected %v, got %v", expected, got) + } + t.Log("SSE3SLOW Support:", got) +} + +// TestAtom tests Atom() function +func TestAtom(t *testing.T) { + got := CPU.Atom() + expected := CPU.Features&ATOM == ATOM + if got != expected { + t.Fatalf("Atom: expected %v, got %v", expected, got) + } + t.Log("ATOM Support:", got) +} + +// TestNX tests NX() function (NX (No-Execute) bit) +func TestNX(t *testing.T) { + got := CPU.NX() + expected := CPU.Features&NX == NX + if got != expected { + t.Fatalf("NX: expected %v, got %v", expected, got) + } + t.Log("NX Support:", got) +} + +// TestSSE4A tests SSE4A() function (AMD Barcelona microarchitecture SSE4a instructions) +func TestSSE4A(t *testing.T) { + got := CPU.SSE4A() + expected := CPU.Features&SSE4A == SSE4A + if got != expected { + t.Fatalf("SSE4A: expected %v, got %v", expected, got) + } + t.Log("SSE4A Support:", got) +} + +// TestHLE tests HLE() function (Hardware Lock Elision) +func TestHLE(t *testing.T) { + got := CPU.HLE() + expected := CPU.Features&HLE == HLE + if got != expected { + t.Fatalf("HLE: expected %v, got %v", expected, got) + } + t.Log("HLE Support:", got) +} + +// TestRTM tests RTM() function (Restricted Transactional Memory) +func TestRTM(t *testing.T) { + got := CPU.RTM() + expected := CPU.Features&RTM == RTM + if got != expected { + t.Fatalf("RTM: expected %v, got %v", expected, got) + } + t.Log("RTM Support:", got) +} + +// TestRdrand tests RDRAND() function (RDRAND instruction is available) +func TestRdrand(t *testing.T) { + got := CPU.Rdrand() + expected := CPU.Features&RDRAND == RDRAND + if got != expected { + t.Fatalf("Rdrand: expected %v, got %v", expected, got) + } + t.Log("Rdrand Support:", got) +} + +// TestRdseed tests RDSEED() function (RDSEED instruction is available) +func TestRdseed(t *testing.T) { + got := CPU.Rdseed() + expected := CPU.Features&RDSEED == RDSEED + if got != expected { + t.Fatalf("Rdseed: expected %v, got %v", expected, got) + } + t.Log("Rdseed Support:", got) +} + +// TestADX tests ADX() function (Intel ADX (Multi-Precision Add-Carry Instruction Extensions)) +func TestADX(t *testing.T) { + got := CPU.ADX() + expected := CPU.Features&ADX == ADX + if got != expected { + t.Fatalf("ADX: expected %v, got %v", expected, got) + } + t.Log("ADX Support:", got) +} + +// TestSHA tests SHA() function (Intel SHA Extensions) +func TestSHA(t *testing.T) { + got := CPU.SHA() + expected := CPU.Features&SHA == SHA + if got != expected { + t.Fatalf("SHA: expected %v, got %v", expected, got) + } + t.Log("SHA Support:", got) +} + +// TestAVX512F tests AVX512F() function (AVX-512 Foundation) +func TestAVX512F(t *testing.T) { + got := CPU.AVX512F() + expected := CPU.Features&AVX512F == AVX512F + if got != expected { + t.Fatalf("AVX512F: expected %v, got %v", expected, got) + } + t.Log("AVX512F Support:", got) +} + +// TestAVX512DQ tests AVX512DQ() function (AVX-512 Doubleword and Quadword Instructions) +func TestAVX512DQ(t *testing.T) { + got := CPU.AVX512DQ() + expected := CPU.Features&AVX512DQ == AVX512DQ + if got != expected { + t.Fatalf("AVX512DQ: expected %v, got %v", expected, got) + } + t.Log("AVX512DQ Support:", got) +} + +// TestAVX512IFMA tests AVX512IFMA() function (AVX-512 Integer Fused Multiply-Add Instructions) +func TestAVX512IFMA(t *testing.T) { + got := CPU.AVX512IFMA() + expected := CPU.Features&AVX512IFMA == AVX512IFMA + if got != expected { + t.Fatalf("AVX512IFMA: expected %v, got %v", expected, got) + } + t.Log("AVX512IFMA Support:", got) +} + +// TestAVX512PF tests AVX512PF() function (AVX-512 Prefetch Instructions) +func TestAVX512PF(t *testing.T) { + got := CPU.AVX512PF() + expected := CPU.Features&AVX512PF == AVX512PF + if got != expected { + t.Fatalf("AVX512PF: expected %v, got %v", expected, got) + } + t.Log("AVX512PF Support:", got) +} + +// TestAVX512ER tests AVX512ER() function (AVX-512 Exponential and Reciprocal Instructions) +func TestAVX512ER(t *testing.T) { + got := CPU.AVX512ER() + expected := CPU.Features&AVX512ER == AVX512ER + if got != expected { + t.Fatalf("AVX512ER: expected %v, got %v", expected, got) + } + t.Log("AVX512ER Support:", got) +} + +// TestAVX512CD tests AVX512CD() function (AVX-512 Conflict Detection Instructions) +func TestAVX512CD(t *testing.T) { + got := CPU.AVX512CD() + expected := CPU.Features&AVX512CD == AVX512CD + if got != expected { + t.Fatalf("AVX512CD: expected %v, got %v", expected, got) + } + t.Log("AVX512CD Support:", got) +} + +// TestAVX512BW tests AVX512BW() function (AVX-512 Byte and Word Instructions) +func TestAVX512BW(t *testing.T) { + got := CPU.AVX512BW() + expected := CPU.Features&AVX512BW == AVX512BW + if got != expected { + t.Fatalf("AVX512BW: expected %v, got %v", expected, got) + } + t.Log("AVX512BW Support:", got) +} + +// TestAVX512VL tests AVX512VL() function (AVX-512 Vector Length Extensions) +func TestAVX512VL(t *testing.T) { + got := CPU.AVX512VL() + expected := CPU.Features&AVX512VL == AVX512VL + if got != expected { + t.Fatalf("AVX512VL: expected %v, got %v", expected, got) + } + t.Log("AVX512VL Support:", got) +} + +// TestAVX512VL tests AVX512VBMI() function (AVX-512 Vector Bit Manipulation Instructions) +func TestAVX512VBMI(t *testing.T) { + got := CPU.AVX512VBMI() + expected := CPU.Features&AVX512VBMI == AVX512VBMI + if got != expected { + t.Fatalf("AVX512VBMI: expected %v, got %v", expected, got) + } + t.Log("AVX512VBMI Support:", got) +} + +// TestMPX tests MPX() function (Intel MPX (Memory Protection Extensions)) +func TestMPX(t *testing.T) { + got := CPU.MPX() + expected := CPU.Features&MPX == MPX + if got != expected { + t.Fatalf("MPX: expected %v, got %v", expected, got) + } + t.Log("MPX Support:", got) +} + +// TestERMS tests ERMS() function (Enhanced REP MOVSB/STOSB) +func TestERMS(t *testing.T) { + got := CPU.ERMS() + expected := CPU.Features&ERMS == ERMS + if got != expected { + t.Fatalf("ERMS: expected %v, got %v", expected, got) + } + t.Log("ERMS Support:", got) +} + +// TestVendor writes the detected vendor. Will be 0 if unknown +func TestVendor(t *testing.T) { + t.Log("Vendor ID:", CPU.VendorID) +} + +// Intel returns true if vendor is recognized as Intel +func TestIntel(t *testing.T) { + got := CPU.Intel() + expected := CPU.VendorID == Intel + if got != expected { + t.Fatalf("TestIntel: expected %v, got %v", expected, got) + } + t.Log("TestIntel:", got) +} + +// AMD returns true if vendor is recognized as AMD +func TestAMD(t *testing.T) { + got := CPU.AMD() + expected := CPU.VendorID == AMD + if got != expected { + t.Fatalf("TestAMD: expected %v, got %v", expected, got) + } + t.Log("TestAMD:", got) +} + +// Transmeta returns true if vendor is recognized as Transmeta +func TestTransmeta(t *testing.T) { + got := CPU.Transmeta() + expected := CPU.VendorID == Transmeta + if got != expected { + t.Fatalf("TestTransmeta: expected %v, got %v", expected, got) + } + t.Log("TestTransmeta:", got) +} + +// NSC returns true if vendor is recognized as National Semiconductor +func TestNSC(t *testing.T) { + got := CPU.NSC() + expected := CPU.VendorID == NSC + if got != expected { + t.Fatalf("TestNSC: expected %v, got %v", expected, got) + } + t.Log("TestNSC:", got) +} + +// VIA returns true if vendor is recognized as VIA +func TestVIA(t *testing.T) { + got := CPU.VIA() + expected := CPU.VendorID == VIA + if got != expected { + t.Fatalf("TestVIA: expected %v, got %v", expected, got) + } + t.Log("TestVIA:", got) +} + +// Test VM function +func TestVM(t *testing.T) { + t.Log("Vendor ID:", CPU.VM()) +} + +// Test RTCounter function +func TestRtCounter(t *testing.T) { + a := CPU.RTCounter() + b := CPU.RTCounter() + t.Log("CPU Counter:", a, b, b-a) +} + +// Prints the value of Ia32TscAux() +func TestIa32TscAux(t *testing.T) { + ecx := CPU.Ia32TscAux() + t.Logf("Ia32TscAux:0x%x\n", ecx) + if ecx != 0 { + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + t.Log("Likely chip, core:", chip, core) + } +} + +func TestThreadsPerCoreNZ(t *testing.T) { + if CPU.ThreadsPerCore == 0 { + t.Fatal("threads per core is zero") + } +} + +// Prints the value of LogicalCPU() +func TestLogicalCPU(t *testing.T) { + t.Log("Currently executing on cpu:", CPU.LogicalCPU()) +} + +func TestMaxFunction(t *testing.T) { + expect := maxFunctionID() + if CPU.maxFunc != expect { + t.Fatal("Max function does not match, expected", expect, "but got", CPU.maxFunc) + } + expect = maxExtendedFunction() + if CPU.maxExFunc != expect { + t.Fatal("Max Extended function does not match, expected", expect, "but got", CPU.maxFunc) + } +} + +// This example will calculate the chip/core number on Linux +// Linux encodes numa id (<<12) and core id (8bit) into TSC_AUX. +func ExampleCPUInfo_Ia32TscAux(t *testing.T) { + ecx := CPU.Ia32TscAux() + if ecx == 0 { + fmt.Println("Unknown CPU ID") + return + } + chip := (ecx & 0xFFF000) >> 12 + core := ecx & 0xFFF + fmt.Println("Chip, Core:", chip, core) +} + +/* +func TestPhysical(t *testing.T) { + var test16 = "CPUID 00000000: 0000000d-756e6547-6c65746e-49656e69 \nCPUID 00000001: 000206d7-03200800-1fbee3ff-bfebfbff \nCPUID 00000002: 76035a01-00f0b2ff-00000000-00ca0000 \nCPUID 00000003: 00000000-00000000-00000000-00000000 \nCPUID 00000004: 3c004121-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004122-01c0003f-0000003f-00000000 \nCPUID 00000004: 3c004143-01c0003f-000001ff-00000000 \nCPUID 00000004: 3c07c163-04c0003f-00003fff-00000006 \nCPUID 00000005: 00000040-00000040-00000003-00021120 \nCPUID 00000006: 00000075-00000002-00000009-00000000 \nCPUID 00000007: 00000000-00000000-00000000-00000000 \nCPUID 00000008: 00000000-00000000-00000000-00000000 \nCPUID 00000009: 00000001-00000000-00000000-00000000 \nCPUID 0000000a: 07300403-00000000-00000000-00000603 \nCPUID 0000000b: 00000000-00000000-00000003-00000003 \nCPUID 0000000b: 00000005-00000010-00000201-00000003 \nCPUID 0000000c: 00000000-00000000-00000000-00000000 \nCPUID 0000000d: 00000007-00000340-00000340-00000000 \nCPUID 0000000d: 00000001-00000000-00000000-00000000 \nCPUID 0000000d: 00000100-00000240-00000000-00000000 \nCPUID 80000000: 80000008-00000000-00000000-00000000 \nCPUID 80000001: 00000000-00000000-00000001-2c100800 \nCPUID 80000002: 20202020-49202020-6c65746e-20295228 \nCPUID 80000003: 6e6f6558-20295228-20555043-322d3545 \nCPUID 80000004: 20303636-20402030-30322e32-007a4847 \nCPUID 80000005: 00000000-00000000-00000000-00000000 \nCPUID 80000006: 00000000-00000000-01006040-00000000 \nCPUID 80000007: 00000000-00000000-00000000-00000100 \nCPUID 80000008: 0000302e-00000000-00000000-00000000" + restore := mockCPU([]byte(test16)) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + Detect() + TestCPUID(t) +} +*/ diff --git a/vendor/github.com/klauspost/cpuid/mockcpu_test.go b/vendor/github.com/klauspost/cpuid/mockcpu_test.go new file mode 100644 index 0000000000..f15173f738 --- /dev/null +++ b/vendor/github.com/klauspost/cpuid/mockcpu_test.go @@ -0,0 +1,209 @@ +package cpuid + +import ( + "archive/zip" + "fmt" + "io/ioutil" + "sort" + "strings" + "testing" +) + +type fakecpuid map[uint32][][]uint32 + +type idfuncs struct { + cpuid func(op uint32) (eax, ebx, ecx, edx uint32) + cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) + xgetbv func(index uint32) (eax, edx uint32) +} + +func (f fakecpuid) String() string { + var out = make([]string, 0, len(f)) + for key, val := range f { + for _, v := range val { + out = append(out, fmt.Sprintf("CPUID %08x: [%08x, %08x, %08x, %08x]", key, v[0], v[1], v[2], v[3])) + } + } + sorter := sort.StringSlice(out) + sort.Sort(&sorter) + return strings.Join(sorter, "\n") +} + +func mockCPU(def []byte) func() { + lines := strings.Split(string(def), "\n") + anyfound := false + fakeID := make(fakecpuid) + for _, line := range lines { + line = strings.Trim(line, "\r\t ") + if !strings.HasPrefix(line, "CPUID") { + continue + } + // Only collect for first cpu + if strings.HasPrefix(line, "CPUID 00000000") { + if anyfound { + break + } + } + if !strings.Contains(line, "-") { + //continue + } + items := strings.Split(line, ":") + if len(items) < 2 { + if len(line) == 51 || len(line) == 50 { + items = []string{line[0:14], line[15:]} + } else { + items = strings.Split(line, "\t") + if len(items) != 2 { + //fmt.Println("not found:", line, "len:", len(line)) + continue + } + } + } + items = items[0:2] + vals := strings.Trim(items[1], "\r\n ") + + var idV uint32 + n, err := fmt.Sscanf(items[0], "CPUID %x", &idV) + if err != nil || n != 1 { + continue + } + existing, ok := fakeID[idV] + if !ok { + existing = make([][]uint32, 0) + } + + values := make([]uint32, 4) + n, err = fmt.Sscanf(vals, "%x-%x-%x-%x", &values[0], &values[1], &values[2], &values[3]) + if n != 4 || err != nil { + n, err = fmt.Sscanf(vals, "%x %x %x %x", &values[0], &values[1], &values[2], &values[3]) + if n != 4 || err != nil { + //fmt.Println("scanned", vals, "got", n, "Err:", err) + continue + } + } + + existing = append(existing, values) + fakeID[idV] = existing + anyfound = true + } + + restorer := func(f idfuncs) func() { + return func() { + cpuid = f.cpuid + cpuidex = f.cpuidex + xgetbv = f.xgetbv + } + }(idfuncs{cpuid: cpuid, cpuidex: cpuidex, xgetbv: xgetbv}) + + cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { + if op == 0x80000000 || op == 0 { + var ok bool + _, ok = fakeID[op] + if !ok { + return 0, 0, 0, 0 + } + } + first, ok := fakeID[op] + if !ok { + if op > maxFunctionID() { + panic(fmt.Sprintf("Base not found: %v, request:%#v\n", fakeID, op)) + } else { + // we have some entries missing + return 0, 0, 0, 0 + } + } + theid := first[0] + return theid[0], theid[1], theid[2], theid[3] + } + cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { + if op == 0x80000000 { + var ok bool + _, ok = fakeID[op] + if !ok { + return 0, 0, 0, 0 + } + } + first, ok := fakeID[op] + if !ok { + if op > maxExtendedFunction() { + panic(fmt.Sprintf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2)) + } else { + // we have some entries missing + return 0, 0, 0, 0 + } + } + if int(op2) >= len(first) { + //fmt.Printf("Extended not found Info: %v, request:%#v, %#v\n", fakeID, op, op2) + return 0, 0, 0, 0 + } + theid := first[op2] + return theid[0], theid[1], theid[2], theid[3] + } + xgetbv = func(index uint32) (eax, edx uint32) { + first, ok := fakeID[1] + if !ok { + panic(fmt.Sprintf("XGETBV not supported %v", fakeID)) + } + second := first[0] + // ECX bit 26 must be set + if (second[2] & 1 << 26) == 0 { + panic(fmt.Sprintf("XGETBV not supported %v", fakeID)) + } + // We don't have any data to return, unfortunately + return 0, 0 + } + return restorer +} + +func TestMocks(t *testing.T) { + zr, err := zip.OpenReader("testdata/cpuid_data.zip") + if err != nil { + t.Skip("No testdata:", err) + } + defer zr.Close() + for _, f := range zr.File { + rc, err := f.Open() + if err != nil { + t.Fatal(err) + } + content, err := ioutil.ReadAll(rc) + if err != nil { + t.Fatal(err) + } + rc.Close() + t.Log("Opening", f.FileInfo().Name()) + restore := mockCPU(content) + Detect() + t.Log("Name:", CPU.BrandName) + n := maxFunctionID() + t.Logf("Max Function:0x%x\n", n) + n = maxExtendedFunction() + t.Logf("Max Extended Function:0x%x\n", n) + t.Log("PhysicalCores:", CPU.PhysicalCores) + t.Log("ThreadsPerCore:", CPU.ThreadsPerCore) + t.Log("LogicalCores:", CPU.LogicalCores) + t.Log("Family", CPU.Family, "Model:", CPU.Model) + t.Log("Features:", CPU.Features) + t.Log("Cacheline bytes:", CPU.CacheLine) + t.Log("L1 Instruction Cache:", CPU.Cache.L1I, "bytes") + t.Log("L1 Data Cache:", CPU.Cache.L1D, "bytes") + t.Log("L2 Cache:", CPU.Cache.L2, "bytes") + t.Log("L3 Cache:", CPU.Cache.L3, "bytes") + if CPU.LogicalCores > 0 && CPU.PhysicalCores > 0 { + if CPU.LogicalCores != CPU.PhysicalCores*CPU.ThreadsPerCore { + t.Fatalf("Core count mismatch, LogicalCores (%d) != PhysicalCores (%d) * CPU.ThreadsPerCore (%d)", + CPU.LogicalCores, CPU.PhysicalCores, CPU.ThreadsPerCore) + } + } + + if CPU.ThreadsPerCore > 1 && !CPU.HTT() { + t.Fatalf("Hyperthreading not detected") + } + if CPU.ThreadsPerCore == 1 && CPU.HTT() { + t.Fatalf("Hyperthreading detected, but only 1 Thread per core") + } + restore() + } + Detect() + +} diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/klauspost/crc32/.travis.yml b/vendor/github.com/klauspost/crc32/.travis.yml new file mode 100644 index 0000000000..c62e25f5a5 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.3 + - 1.4 + - 1.5 + - tip + +script: + - go test -v . + - go test -v -race . diff --git a/vendor/github.com/klauspost/crc32/crc32_test.go b/vendor/github.com/klauspost/crc32/crc32_test.go new file mode 100644 index 0000000000..d9dd3099f9 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/crc32_test.go @@ -0,0 +1,170 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32 + +import ( + "hash" + "hash/crc32" + "io" + "testing" +) + +type test struct { + ieee, castagnoli uint32 + in string +} + +var golden = []test{ + {0x0, 0x0, ""}, + {0xe8b7be43, 0xc1d04330, "a"}, + {0x9e83486d, 0xe2a22936, "ab"}, + {0x352441c2, 0x364b3fb7, "abc"}, + {0xed82cd11, 0x92c80a31, "abcd"}, + {0x8587d865, 0xc450d697, "abcde"}, + {0x4b8e39ef, 0x53bceff1, "abcdef"}, + {0x312a6aa6, 0xe627f441, "abcdefg"}, + {0xaeef2a50, 0xa9421b7, "abcdefgh"}, + {0x8da988af, 0x2ddc99fc, "abcdefghi"}, + {0x3981703a, 0xe6599437, "abcdefghij"}, + {0x6b9cdfe7, 0xb2cc01fe, "Discard medicine more than two years old."}, + {0xc90ef73f, 0xe28207f, "He who has a shady past knows that nice guys finish last."}, + {0xb902341f, 0xbe93f964, "I wouldn't marry him with a ten foot pole."}, + {0x42080e8, 0x9e3be0c3, "Free! Free!/A trip/to Mars/for 900/empty jars/Burma Shave"}, + {0x154c6d11, 0xf505ef04, "The days of the digital watch are numbered. -Tom Stoppard"}, + {0x4c418325, 0x85d3dc82, "Nepal premier won't resign."}, + {0x33955150, 0xc5142380, "For every action there is an equal and opposite government program."}, + {0x26216a4b, 0x75eb77dd, "His money is twice tainted: 'taint yours and 'taint mine."}, + {0x1abbe45e, 0x91ebe9f7, "There is no reason for any individual to have a computer in their home. -Ken Olsen, 1977"}, + {0xc89a94f7, 0xf0b1168e, "It's a tiny change to the code and not completely disgusting. - Bob Manchek"}, + {0xab3abe14, 0x572b74e2, "size: a.out: bad magic"}, + {0xbab102b6, 0x8a58a6d5, "The major problem is with sendmail. -Mark Horton"}, + {0x999149d7, 0x9c426c50, "Give me a rock, paper and scissors and I will move the world. CCFestoon"}, + {0x6d52a33c, 0x735400a4, "If the enemy is within range, then so are you."}, + {0x90631e8d, 0xbec49c95, "It's well we cannot hear the screams/That we create in others' dreams."}, + {0x78309130, 0xa95a2079, "You remind me of a TV show, but that's all right: I watch it anyway."}, + {0x7d0a377f, 0xde2e65c5, "C is as portable as Stonehedge!!"}, + {0x8c79fd79, 0x297a88ed, "Even if I could be Shakespeare, I think I should still choose to be Faraday. - A. Huxley"}, + {0xa20b7167, 0x66ed1d8b, "The fugacity of a constituent in a mixture of gases at a given temperature is proportional to its mole fraction. Lewis-Randall Rule"}, + {0x8e0bb443, 0xdcded527, "How can you write a big system without C++? -Paul Glick"}, +} + +func TestGolden(t *testing.T) { + castagnoliTab := MakeTable(Castagnoli) + + for _, g := range golden { + ieee := NewIEEE() + io.WriteString(ieee, g.in) + s := ieee.Sum32() + if s != g.ieee { + t.Errorf("IEEE(%s) = 0x%x want 0x%x", g.in, s, g.ieee) + } + + castagnoli := New(castagnoliTab) + io.WriteString(castagnoli, g.in) + s = castagnoli.Sum32() + if s != g.castagnoli { + t.Errorf("Castagnoli(%s) = 0x%x want 0x%x", g.in, s, g.castagnoli) + } + + if len(g.in) > 0 { + // The SSE4.2 implementation of this has code to deal + // with misaligned data so we ensure that we test that + // too. + castagnoli = New(castagnoliTab) + io.WriteString(castagnoli, g.in[:1]) + io.WriteString(castagnoli, g.in[1:]) + s = castagnoli.Sum32() + if s != g.castagnoli { + t.Errorf("Castagnoli[misaligned](%s) = 0x%x want 0x%x", g.in, s, g.castagnoli) + } + } + } +} + +func BenchmarkCrc40B(b *testing.B) { + benchmark(b, NewIEEE(), 40) +} + +func BenchmarkStdCrc40B(b *testing.B) { + benchmark(b, crc32.NewIEEE(), 40) +} + +func BenchmarkCrc1KB(b *testing.B) { + benchmark(b, NewIEEE(), 1024) +} + +func BenchmarkStdCrc1KB(b *testing.B) { + benchmark(b, crc32.NewIEEE(), 1024) +} + +func BenchmarkCrc8KB(b *testing.B) { + benchmark(b, NewIEEE(), 8*1024) +} + +func BenchmarkStdCrc8KB(b *testing.B) { + benchmark(b, crc32.NewIEEE(), 8*1024) +} + +func BenchmarkCrc32KB(b *testing.B) { + benchmark(b, NewIEEE(), 32*1024) +} + +func BenchmarkStdCrc32KB(b *testing.B) { + benchmark(b, crc32.NewIEEE(), 32*1024) +} + +func BenchmarkCastagnoli40B(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 40) +} + +func BenchmarkStdCastagnoli40B(b *testing.B) { + benchmark(b, crc32.New(crc32.MakeTable(Castagnoli)), 40) +} + +func BenchmarkCastagnoli1KB(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 1024) +} + +func BenchmarkStdCastagnoli1KB(b *testing.B) { + benchmark(b, crc32.New(crc32.MakeTable(Castagnoli)), 1024) +} + +func BenchmarkCastagnoli8KB(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 8*1024) +} + +func BenchmarkStdCastagnoli8KB(b *testing.B) { + benchmark(b, crc32.New(crc32.MakeTable(Castagnoli)), 8*1024) +} + +func BenchmarkCastagnoli32KB(b *testing.B) { + benchmark(b, New(MakeTable(Castagnoli)), 32*1024) +} + +func BenchmarkStdCastagnoli32KB(b *testing.B) { + benchmark(b, crc32.New(crc32.MakeTable(Castagnoli)), 32*1024) +} + +func benchmark(b *testing.B, h hash.Hash32, n int64) { + b.SetBytes(n) + data := make([]byte, n) + for i := range data { + data[i] = byte(i) + } + in := make([]byte, 0, h.Size()) + + // Warm up + h.Reset() + h.Write(data) + h.Sum(in) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + h.Reset() + h.Write(data) + h.Sum(in) + } +} diff --git a/vendor/github.com/klauspost/crc32/example_test.go b/vendor/github.com/klauspost/crc32/example_test.go new file mode 100644 index 0000000000..621bf83830 --- /dev/null +++ b/vendor/github.com/klauspost/crc32/example_test.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package crc32_test + +import ( + "fmt" + "hash/crc32" +) + +func ExampleMakeTable() { + // In this package, the CRC polynomial is represented in reversed notation, + // or LSB-first representation. + // + // LSB-first representation is a hexadecimal number with n bits, in which the + // most significant bit represents the coefficient of x⁰ and the least significant + // bit represents the coefficient of xⁿ⁻¹ (the coefficient for xⁿ is implicit). + // + // For example, CRC32-Q, as defined by the following polynomial, + // x³²+ x³¹+ x²⁴+ x²²+ x¹⁶+ x¹⁴+ x⁸+ x⁷+ x⁵+ x³+ x¹+ x⁰ + // has the reversed notation 0b11010101100000101000001010000001, so the value + // that should be passed to MakeTable is 0xD5828281. + crc32q := crc32.MakeTable(0xD5828281) + fmt.Printf("%08x\n", crc32.Checksum([]byte("Hello world"), crc32q)) + // Output: + // 2964d064 +} diff --git a/vendor/github.com/mattbaird/elastigo/.drone.yml b/vendor/github.com/mattbaird/elastigo/.drone.yml new file mode 100644 index 0000000000..c7d12df6bc --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/.drone.yml @@ -0,0 +1,10 @@ +script: + - go get -t ./... + - go get github.com/bmizerany/assert + - cd lib + - go build + - go test -v -loaddata + - go test -v + +services: + - dockerfile/elasticsearch diff --git a/vendor/github.com/mattbaird/elastigo/.gitignore b/vendor/github.com/mattbaird/elastigo/.gitignore new file mode 100644 index 0000000000..a6c4926741 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/.gitignore @@ -0,0 +1,31 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.vagrant + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +elastigo + +# IDE + +*.iml +.idea/ diff --git a/vendor/github.com/mattbaird/elastigo/.gitmodules b/vendor/github.com/mattbaird/elastigo/.gitmodules new file mode 100644 index 0000000000..312fdd1ac7 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/.gitmodules @@ -0,0 +1,12 @@ +[submodule "java"] + path = java + url = https://github.com/cookbooks/java.git +[submodule "cookbooks/java"] + path = cookbooks/java + url = https://github.com/cookbooks/java.git +[submodule "cookbooks/apt"] + path = cookbooks/apt + url = https://github.com/opscode-cookbooks/apt.git +[submodule "cookbooks/elasticsearch"] + path = cookbooks/elasticsearch + url = https://github.com/mattbaird/elasticsearch-chef.git diff --git a/vendor/github.com/mattbaird/elastigo/.travis.yml b/vendor/github.com/mattbaird/elastigo/.travis.yml new file mode 100644 index 0000000000..2cb927cca3 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/.travis.yml @@ -0,0 +1,13 @@ +services: + - elasticsearch + +language: go + +go: + - 1.6 + +script: + - go get -t ./... + - go build ./lib/ + - cd ./lib/ && go test -v -host localhost -loaddata + - go install diff --git a/vendor/github.com/mattbaird/elastigo/HACKING.md b/vendor/github.com/mattbaird/elastigo/HACKING.md new file mode 100644 index 0000000000..ae6c76c446 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/HACKING.md @@ -0,0 +1,21 @@ + +Testing +----------------- + +To run tests, this library loads data into an elasticsearch server and tests against that. + +See api/coretest_test.go. The data set should remain the same as it pulls a known set of github archive data. + +usage: + + $cd core + + $go test -v -host eshost -loaddata # load the data + + $go test -v -host eshost # without load data, which only needs to run once + +Clean out the Elasticsearch index: + + http -v DELETE http://localhost:9200/github + or + curl -XDELETE http://localhost:9200/github diff --git a/vendor/github.com/mattbaird/elastigo/README.md b/vendor/github.com/mattbaird/elastigo/README.md new file mode 100644 index 0000000000..26d0625db7 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/README.md @@ -0,0 +1,249 @@ +elastigo v2.0 +------------- + +[![Build Status](https://drone.io/github.com/mattbaird/elastigo/status.png)](https://drone.io/github.com/mattbaird/elastigo) + +Big thanks to @alicebob for helping to get the drone.io CI working (note: the badge is being cached, known issue). + +A Go (Golang) based Elasticsearch client, implements core api for Indexing and searching. +GoDoc http://godoc.org/github.com/mattbaird/elastigo + + +NOTE: Based on the great work from Jeremy Shute, Elastigo now supports multiple connections. We attempted to make this backwards compatible, however in the end it wasn't possible, so we tagged the older single connection code as v1.0 and started work on v2.0. + +If you want to use v1.0, you can use a tool like GoDep to make that possible. See http://bit.ly/VLG2et for full details. + +The godep tool saves the exact version of the dependencies you’re building your project against, which means that upstream modifications in third-party dependencies won’t break your build. + +```bash +go get github.com/tools/godep +``` + +Now, to pull in an existing project with godep: +```bash + godep go get github.com/myuser/myproject +``` + +When your code compiles in your workspace, ala: + +```bash +cd $HOME/gopath/src/github.com/myuser/myproject +# hack hack hack +go build ./... +``` + +You can freeze your dependencies thusly: + +```bash +godep save github.com/myuser/myproject +git add Godeps +``` + +The godep tool will examine your code to find and save the transitive closure of your dependencies in the current directory, observing their versions. If you want to restore or update these versions, see the documentation for the tool. + +Note, in particular, that if your current directory contains a group of binaries or packages, you may save all of them at once: + +```bash +godep save ./... +``` + +To get the Chef based Vagrantfile working, be sure to pull like so:: + + # This will pull submodules. + git clone --recursive git@github.com:mattbaird/elastigo.git + +It's easier to use the ElasticSearch provided Docker image found here: https://github.com/dockerfile/elasticsearch + +Non-persistent usage is: +```bash +docker run -d -p 9200:9200 -p 9300:9300 dockerfile/elasticsearch +``` + +Quick Start with Docker +======================= +Make sure docker is installed. If you are running docker on a mac, you must expose ports 9200 and 9300. Shut down docker: +```bash +boot2docker stop +``` +and run +```bash +for i in {9200..9300}; do + VBoxManage modifyvm "boot2docker-vm" --natpf1 "tcp-port$i,tcp,,$i,,$i"; + VBoxManage modifyvm "boot2docker-vm" --natpf1 "udp-port$i,udp,,$i,,$i"; +done +``` +The following will allow you to get the code, and run the tests against your docker based non-persistent elasticsearch: + +```bash +docker run -d -p 9200:9200 -p 9300:9300 dockerfile/elasticsearch +git clone git@github.com:mattbaird/elastigo.git +cd elastigo +go get -u ./... +cd lib +go test -v -host localhost -loaddata +cd .. +go test -v ./... +``` + +Usage Examples - Currently out of date, being rewritten for v2.0 +---------------------------------------------------------------- + +Adding content to Elasticsearch +------------------------------- + +```go +import "github.com/mattbaird/elastigo/api" +import "github.com/mattbaird/elastigo/core" + +type Tweet struct { + User string `json:"user"` + Message string `json:"message"` +} + +// Set the Elasticsearch Host to Connect to +api.Domain = "localhost" +// api.Port = "9300" + +// add single go struct entity +response, _ := core.Index("twitter", "tweet", "1", nil, Tweet{"kimchy", "Search is cool"}) + +// you have bytes +tw := Tweet{"kimchy", "Search is cool part 2"} +bytesLine, err := json.Marshal(tw) +response, _ := core.Index("twitter", "tweet", "2", nil, bytesLine) + +// Bulk Indexing +t := time.Now() +core.IndexBulk("twitter", "tweet", "3", &t, Tweet{"kimchy", "Search is now cooler"}) + +// Search Using Raw json String +searchJson := `{ + "query" : { + "term" : { "user" : "kimchy" } + } +}` +out, err := core.SearchRequest(true, "twitter", "tweet", searchJson, "") +if len(out.Hits.Hits) == 1 { + fmt.Println(string(out.Hits.Hits[0].Source)) +} +``` + +A Faceted, ranged Search using the `Search DSL` : + +```go +import "github.com/mattbaird/elastigo/api" +import "github.com/mattbaird/elastigo/core" + +// Set the Elasticsearch Host to Connect to +api.Domain = "localhost" +// api.Port = "9300" + +out, err := Search("github").Size("1").Facet( + Facet().Fields("actor").Size("500"), +).Query( + Query().Range( + Range().Field("created_at").From("2012-12-10T15:00:00-08:00").To("2012-12-10T15:10:00-08:00"), + ).Search("add"), +).Result() +``` + +A Ranged Search using the `Search DSL` : + +```go +out, err := Search("github").Type("Issues").Pretty().Query( + Query().Range( + Range().Field("created_at").From("2012-12-10T15:00:00-08:00").To("2012-12-10T15:10:00-08:00"), + ).Search("add"), +).Result() +``` + +A Simple Search using the `Search DSL` : + +```go +out, err := Search("github").Type("Issues").Size("100").Search("add").Result() +``` + +A Direct Search using the api : + +```go +qry := map[string]interface{}{ + "query":map[string]interface{}{ + "term":map[string]string{"user": "kimchy"}, + }, +} +core.SearchRequest(true, "github", "Issues", qry, "", 0) +``` + +A Direct Search using the query string Api : + +```go +core.SearchUri("github", "Issues", "user:kimchy", "", 0) +``` + +A Filtered search `Search DSL` : + +```go +out, err := Search("github").Filter( + Filter().Exists("repository.name"), +).Result() +``` + +Adding content to Elasticsearch in Bulk +---------------------------------------------- + +```go +import "github.com/mattbaird/elastigo/api" +import "github.com/mattbaird/elastigo/core" + +// Set the Elasticsearch Host to Connect to +api.Domain = "localhost" +// api.Port = "9300" + +indexer := core.NewBulkIndexerErrors(10, 60) +done := make(chan bool) +indexer.Run(done) + +go func() { + for errBuf := range indexer.ErrorChannel { + // just blissfully print errors forever + fmt.Println(errBuf.Err) + } +}() +for i := 0; i < 20; i++ { + indexer.Index("twitter", "user", strconv.Itoa(i), "", nil, `{"name":"bob"}`, false) +} +done <- true +// Indexing might take a while. So make sure the program runs +// a little longer when trying this in main. +``` + +status updates +======================== + +* *2014-07-09* Version 2.0 development started. Focused on multi-connection support, using Dial idiom. +* *2014-5-21* Note: Drone.io tests are failing, I don't know why because the build and tests are working fine for me on my ubuntu box running the docker elasticsearch image. It's possible there is a timing issue. Any Ideas? +* *2013-9-27* Fleshing out cluster and indices APIs, updated vagrant image to 0.90.3 +* *2013-7-10* Improvements/changes to bulk indexer (includes breaking changes to support TTL), + Search dsl supports And/Or/Not + * *SearchDsl* should still be considered beta at this + point, there will be minor breaking changes as more of the + elasticsearch feature set is implemented. +* *2013-1-26* expansion of search dsl for greater coverage +* *2012-12-30* new bulk indexing and search dsl +* *2012-10-12* early in development, not ready for production yet. + +license +======= + Copyright 2012 Matthew Baird, Aaron Raddon, Jeremy Shute and more! + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/mattbaird/elastigo/Vagrantfile b/vendor/github.com/mattbaird/elastigo/Vagrantfile new file mode 100644 index 0000000000..cbd1f563f4 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/Vagrantfile @@ -0,0 +1,29 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +Vagrant.configure("2") do |config| + config.vm.box = "lucid64" + config.vm.box_url = "http://files.vagrantup.com/lucid64.box" + config.vm.network :forwarded_port, guest: 9300, host: 9300, auto_correct: true + config.vm.provision :shell, :inline => "gem install chef --version 10.26.0 --no-rdoc --no-ri --conservative" + + config.vm.provider :virtualbox do |vb| + vb.gui = false + vb.customize ["modifyvm", :id, "--memory", "1024"] + vb.customize ["modifyvm", :id, "--cpus", "1"] + # This allows symlinks to be created within the /vagrant root directory, + # which is something librarian-puppet needs to be able to do. This might + # be enabled by default depending on what version of VirtualBox is used. + vb.customize ["setextradata", :id, "VBoxInternal2/SharedFoldersEnableSymlinksCreate/v-root", "1"] + end + config.vm.provision :chef_solo do |chef| + chef.cookbooks_path = "cookbooks" + chef.add_recipe("apt") + chef.add_recipe("java") + chef.add_recipe("elasticsearch") + chef.add_recipe("git") + chef.add_recipe("mercurial") + chef.add_recipe("build-essential") + chef.add_recipe("golang") + end +end \ No newline at end of file diff --git a/vendor/github.com/mattbaird/elastigo/client.go b/vendor/github.com/mattbaird/elastigo/client.go new file mode 100644 index 0000000000..b416e0fd84 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/client.go @@ -0,0 +1,85 @@ +// Copyright 2012 Matthew Baird +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package main + +import ( + "encoding/json" + "flag" + "log" + "time" + + elastigo "github.com/mattbaird/elastigo/lib" +) + +var ( + eshost *string = flag.String("host", "localhost", "Elasticsearch Server Host Address") +) + +// for testing +func main() { + flag.Parse() + log.SetFlags(log.Ltime | log.Lshortfile) + + c := elastigo.NewConn() + c.Domain = *eshost + response, _ := c.Index("twitter", "tweet", "1", nil, NewTweet("kimchy", "Search is cool")) + c.Flush() + log.Printf("Index OK: %v", response.Ok) + searchresponse, err := c.Search("twitter", "tweet", nil, "{\"query\" : {\"term\" : { \"user\" : \"kimchy\" }}}") + if err != nil { + log.Println("error during search:" + err.Error()) + log.Fatal(err) + } + // try marshalling to tweet type + var t Tweet + bytes, err := searchresponse.Hits.Hits[0].Source.MarshalJSON() + if err != nil { + log.Fatalf("err calling marshalJson:%v", err) + } + json.Unmarshal(bytes, &t) + log.Printf("Search Found: %s", t) + response, _ = c.Get("twitter", "tweet", "1", nil) + log.Printf("Get: %v", response.Exists) + exists, _ := c.Exists("twitter", "tweet", "1", nil) + log.Printf("Exists: %v", exists) + c.Flush() + countResponse, _ := c.Count("twitter", "tweet", nil, nil) + + log.Printf("Count: %v", countResponse.Count) + response, _ = c.Delete("twitter", "tweet", "1", map[string]interface{}{"version": -1, "routing": ""}) + log.Printf("Delete OK: %v", response.Ok) + response, _ = c.Get("twitter", "tweet", "1", nil) + log.Printf("Get: %v", response.Exists) + + healthResponse, _ := c.Health() + log.Printf("Health: %v", healthResponse.Status) + + c.UpdateSettings("transient", "discovery.zen.minimum_master_nodes", 2) +} + +// used in test suite, chosen to be similar to the documentation +type Tweet struct { + User string `json:"user"` + PostDate time.Time `json:"postDate"` + Message string `json:"message"` +} + +func NewTweet(user string, message string) Tweet { + return Tweet{User: user, PostDate: time.Now(), Message: message} +} + +func (t *Tweet) String() string { + b, _ := json.Marshal(t) + return string(b) +} diff --git a/vendor/github.com/mattbaird/elastigo/doc.go b/vendor/github.com/mattbaird/elastigo/doc.go new file mode 100644 index 0000000000..c7976d1ee8 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/doc.go @@ -0,0 +1,15 @@ +// Copyright 2012 Matthew Baird +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main diff --git a/vendor/github.com/mattbaird/elastigo/lib/cataliasinfo_test.go b/vendor/github.com/mattbaird/elastigo/lib/cataliasinfo_test.go new file mode 100644 index 0000000000..ffd23f573d --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/cataliasinfo_test.go @@ -0,0 +1,26 @@ +package elastigo + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestCatAliasInfo(t *testing.T) { + Convey("catAlias Create alias line from a broken alias listing", t, func() { + _, err := NewCatAliasInfo("production ") + So(err, ShouldNotBeNil) + }) + Convey("catAlias Create alias line from a complete alias listing", t, func() { + i, err := NewCatAliasInfo("production production-2016") + So(err, ShouldBeNil) + So(i.Name, ShouldEqual, "production") + So(i.Index, ShouldEqual, "production-2016") + }) + Convey("catAlias Create alias line from an over-complete alias listing", t, func() { + i, err := NewCatAliasInfo("production production-2016 - - -") + So(err, ShouldBeNil) + So(i.Name, ShouldEqual, "production") + So(i.Index, ShouldEqual, "production-2016") + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/catindexinfo_test.go b/vendor/github.com/mattbaird/elastigo/lib/catindexinfo_test.go new file mode 100644 index 0000000000..d09f6cd922 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/catindexinfo_test.go @@ -0,0 +1,117 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestCatIndexInfo(t *testing.T) { + Convey("Create index line from a broken index listing", t, func() { + _, err := NewCatIndexInfo("red ") + So(err, ShouldNotBeNil) + }) + Convey("catIndex Create index line from a bad shards index listing", t, func() { + i, err := NewCatIndexInfo("green open logs-2015-06-19 2 1 135389346 20 53048922233 53048922233") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "green") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "logs-2015-06-19") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 135389346) + So(i.Docs.Deleted, ShouldEqual, 20) + So(i.Store.Size, ShouldEqual, 53048922233) + So(i.Store.PriSize, ShouldEqual, 53048922233) + }) + Convey("catIndex Create index line from a bad replicas index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 0 1234 3 11000 13000") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 0) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a complete index listing", t, func() { + i, err := NewCatIndexInfo("red closed foo-2000-01-01-bar 2 1 1234 3 11000 13000") + So(err, ShouldBeNil) + So(i.Status, ShouldEqual, "closed") + So(i.Health, ShouldEqual, "red") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a bad docs index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 a 3 11000 13000") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 0) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a bad deletes index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234 a 11000 13000") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 0) + So(i.Store.Size, ShouldEqual, 11000) + So(i.Store.PriSize, ShouldEqual, 13000) + }) + Convey("catIndex Create index line from a kinda short index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 0) + So(i.Store.Size, ShouldEqual, 0) + So(i.Store.PriSize, ShouldEqual, 0) + }) + Convey("catIndex Create index line from a kinda sorta short index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1 1234 3") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 1234) + So(i.Docs.Deleted, ShouldEqual, 3) + So(i.Store.Size, ShouldEqual, 0) + So(i.Store.PriSize, ShouldEqual, 0) + }) + Convey("catIndex Create index line from a short index listing", t, func() { + i, err := NewCatIndexInfo("red open foo-2000-01-01-bar 2 1") + So(err, ShouldBeNil) + So(i.Health, ShouldEqual, "red") + So(i.Status, ShouldEqual, "open") + So(i.Name, ShouldEqual, "foo-2000-01-01-bar") + So(i.Shards, ShouldEqual, 2) + So(i.Replicas, ShouldEqual, 1) + So(i.Docs.Count, ShouldEqual, 0) + So(i.Docs.Deleted, ShouldEqual, 0) + So(i.Store.Size, ShouldEqual, 0) + So(i.Store.PriSize, ShouldEqual, 0) + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go b/vendor/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go new file mode 100644 index 0000000000..441de24425 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/catnodeinfo_test.go @@ -0,0 +1,58 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestCatNode(t *testing.T) { + + c := NewTestConn() + + Convey("Basic cat nodes", t, func() { + + fields := []string{"fm", "fe", "fcm", "fce", "ft", "ftt", "im", "rp", "n"} + catNodes, err := c.GetCatNodeInfo(fields) + + So(err, ShouldBeNil) + So(catNodes, ShouldNotBeNil) + So(len(catNodes), ShouldBeGreaterThan, 0) + + for _, catNode := range catNodes { + So(catNode.FieldMem, ShouldNotBeEmpty) + So(catNode.FiltMem, ShouldNotBeEmpty) + So(catNode.IDCacheMemory, ShouldNotBeEmpty) + So(catNode.RamPerc, ShouldNotBeEmpty) + So(catNode.Name, ShouldNotBeEmpty) + } + }) + + Convey("Cat nodes with default arguments", t, func() { + + fields := []string{} + catNodes, err := c.GetCatNodeInfo(fields) + + So(err, ShouldBeNil) + So(catNodes, ShouldNotBeNil) + So(len(catNodes), ShouldBeGreaterThan, 0) + + for _, catNode := range catNodes { + So(catNode.Host, ShouldNotBeEmpty) + So(catNode.IP, ShouldNotBeEmpty) + So(catNode.NodeRole, ShouldNotBeEmpty) + So(catNode.Name, ShouldNotBeEmpty) + } + }) + + Convey("Invalid field error behavior", t, func() { + + fields := []string{"fm", "bogus"} + catNodes, err := c.GetCatNodeInfo(fields) + + So(err, ShouldNotBeNil) + + for _, catNode := range catNodes { + So(catNode.FieldMem, ShouldNotBeEmpty) + } + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/catshardinfo_test.go b/vendor/github.com/mattbaird/elastigo/lib/catshardinfo_test.go new file mode 100644 index 0000000000..dd6aaaa4f6 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/catshardinfo_test.go @@ -0,0 +1,85 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestCatShardInfo(t *testing.T) { + Convey("Create cat shard from started shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1 Ultra Man") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "STARTED") + So(c.Docs, ShouldEqual, 1234) + So(c.Store, ShouldEqual, 121) + So(c.NodeIP, ShouldEqual, "127.0.0.1") + So(c.NodeName, ShouldEqual, "Ultra Man") + + }) + Convey("Create cat shard from realocating shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p RELOCATING 1234 121 127.0.0.1 Ultra Man -> 10.0.0.1 Super Man") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "RELOCATING") + So(c.Docs, ShouldEqual, 1234) + So(c.Store, ShouldEqual, 121) + So(c.NodeIP, ShouldEqual, "127.0.0.1") + So(c.NodeName, ShouldEqual, "Ultra Man") + }) + Convey("Create cat shard from unallocated shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p UNASSIGNED") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "UNASSIGNED") + So(c.Docs, ShouldEqual, 0) + So(c.Store, ShouldEqual, 0) + So(c.NodeIP, ShouldEqual, "") + So(c.NodeName, ShouldEqual, "") + }) + Convey("Create cat shard from invalid shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar 0 p") + So(err, ShouldEqual, ErrInvalidShardLine) + So(c, ShouldBeNil) + }) + Convey("Create cat shard from garbled shard", t, func() { + c, err := NewCatShardInfo("foo-2000-01-01-bar a p STARTED abc 121 127.0.0.1 Ultra Man") + So(err, ShouldBeNil) + So(c, ShouldNotBeNil) + So(c.Shard, ShouldEqual, -1) + So(c.IndexName, ShouldEqual, "foo-2000-01-01-bar") + So(c.Primary, ShouldEqual, "p") + So(c.State, ShouldEqual, "STARTED") + So(c.Docs, ShouldEqual, 0) + So(c.Store, ShouldEqual, 121) + So(c.NodeIP, ShouldEqual, "127.0.0.1") + So(c.NodeName, ShouldEqual, "Ultra Man") + }) + Convey("Print cat shard from started shard", t, func() { + c, _ := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1 Ultra Man") + s := c.String() + So(s, ShouldContainSubstring, "foo-2000-01-01-bar:") + So(s, ShouldContainSubstring, ":Ultra Man") + c = nil + s = c.String() + So(s, ShouldEqual, ":::::::") + }) + Convey("Print cat shard from short shard", t, func() { + c, _ := NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234") + s := c.String() + So(s, ShouldContainSubstring, "foo-2000-01-01-bar:0:p:STARTED:1234") + c, _ = NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121") + s = c.String() + So(s, ShouldContainSubstring, "oo-2000-01-01-bar:0:p:STARTED:1234:121") + c, _ = NewCatShardInfo("foo-2000-01-01-bar 0 p STARTED 1234 121 127.0.0.1") + s = c.String() + So(s, ShouldContainSubstring, "oo-2000-01-01-bar:0:p:STARTED:1234:121:127.0.0.1") + }) + +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go b/vendor/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go new file mode 100644 index 0000000000..5ee6b5d7b6 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/clusternodesinfo_test.go @@ -0,0 +1,37 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "fmt" + "github.com/bmizerany/assert" + "testing" +) + +func TestGetAll(t *testing.T) { + InitTests(true) + c := NewTestConn() + nodesInfo, err := c.AllNodesInfo() + assert.T(t, err == nil, fmt.Sprintf("should not have gotten error, received: %v", err)) + assert.T(t, nodesInfo.ClusterName != "", fmt.Sprintf("clustername should have been not empty. received: %q", nodesInfo.ClusterName)) + for _, node := range nodesInfo.Nodes { + assert.T(t, node.Settings != nil, fmt.Sprintf("Settings should not have been null")) + assert.T(t, node.OS != nil, fmt.Sprintf("OS should not have been null")) + assert.T(t, node.Process != nil, fmt.Sprintf("Process should not have been null")) + assert.T(t, node.JVM != nil, fmt.Sprintf("JVM should not have been null")) + assert.T(t, node.ThreadPool != nil, fmt.Sprintf("ThreadPool should not have been null")) + assert.T(t, node.Network != nil, fmt.Sprintf("Network should not have been null")) + assert.T(t, node.Transport != nil, fmt.Sprintf("Transport should not have been null")) + assert.T(t, node.Http != nil, fmt.Sprintf("Http should not have been null")) + assert.T(t, node.Plugins != nil, fmt.Sprintf("Plugins should not have been null")) + } +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/connection_test.go b/vendor/github.com/mattbaird/elastigo/lib/connection_test.go new file mode 100644 index 0000000000..5719b017ac --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/connection_test.go @@ -0,0 +1,62 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "fmt" + "testing" + + "github.com/bmizerany/assert" +) + +func TestSetFromUrl(t *testing.T) { + c := NewConn() + + err := c.SetFromUrl("http://localhost") + exp := "localhost" + assert.T(t, c.Domain == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Domain)) + + c = NewConn() + + err = c.SetFromUrl("http://localhost:9200") + exp = "9200" + assert.T(t, c.Port == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Port)) + + c = NewConn() + + err = c.SetFromUrl("http://localhost:9200") + exp = "localhost" + assert.T(t, c.Domain == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Domain)) + + c = NewConn() + + err = c.SetFromUrl("http://someuser@localhost:9200") + exp = "someuser" + assert.T(t, c.Username == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Username)) + + c = NewConn() + + err = c.SetFromUrl("http://someuser:password@localhost:9200") + exp = "password" + assert.T(t, c.Password == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Password)) + + c = NewConn() + + err = c.SetFromUrl("http://someuser:password@localhost:9200") + exp = "someuser" + assert.T(t, c.Username == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, c.Username)) + + c = NewConn() + + err = c.SetFromUrl("") + exp = "Url is empty" + assert.T(t, err != nil && err.Error() == exp, fmt.Sprintf("Expected %s, got: %s", exp, err.Error())) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/corebulk_test.go b/vendor/github.com/mattbaird/elastigo/lib/corebulk_test.go new file mode 100644 index 0000000000..e352ae338e --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/corebulk_test.go @@ -0,0 +1,399 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bytes" + "crypto/rand" + "encoding/json" + "flag" + "fmt" + "log" + "net/url" + "strconv" + "sync" + "testing" + "time" + + "github.com/araddon/gou" + "github.com/bmizerany/assert" +) + +// go test -bench=".*" +// go test -bench="Bulk" + +type sharedBuffer struct { + mu sync.Mutex + Buffer []*bytes.Buffer +} + +func NewSharedBuffer() *sharedBuffer { + return &sharedBuffer{ + Buffer: make([]*bytes.Buffer, 0), + } +} + +func (b *sharedBuffer) Append(buf *bytes.Buffer) { + b.mu.Lock() + defer b.mu.Unlock() + b.Buffer = append(b.Buffer, buf) +} + +func (b *sharedBuffer) Length() int { + b.mu.Lock() + defer b.mu.Unlock() + return len(b.Buffer) +} + +func init() { + flag.Parse() + if testing.Verbose() { + gou.SetupLogging("debug") + } +} + +// take two ints, compare, need to be within 5% +func closeInt(a, b int) bool { + c := float64(a) / float64(b) + if c >= .95 && c <= 1.05 { + return true + } + return false +} + +func TestBulkIndexerBasic(t *testing.T) { + testIndex := "users" + var ( + buffers = NewSharedBuffer() + totalBytesSent int + messageSets int + ) + + InitTests(true) + c := NewTestConn() + + c.DeleteIndex(testIndex) + + indexer := c.NewBulkIndexer(3) + indexer.Sender = func(buf *bytes.Buffer) error { + messageSets += 1 + totalBytesSent += buf.Len() + buffers.Append(buf) + //log.Printf("buffer:%s", string(buf.Bytes())) + return indexer.Send(buf) + } + indexer.Start() + + date := time.Unix(1257894000, 0) + data := map[string]interface{}{ + "name": "smurfs", + "age": 22, + "date": "yesterday", + } + + err := indexer.Index(testIndex, "user", "1", "", "", &date, data) + waitFor(func() bool { + return buffers.Length() > 0 + }, 5) + + // part of request is url, so lets factor that in + //totalBytesSent = totalBytesSent - len(*eshost) + assert.T(t, buffers.Length() == 1, fmt.Sprintf("Should have sent one operation but was %d", buffers.Length())) + assert.T(t, indexer.NumErrors() == 0 && err == nil, fmt.Sprintf("Should not have any errors. NumErrors: %v, err: %v", indexer.NumErrors(), err)) + expectedBytes := 129 + assert.T(t, totalBytesSent == expectedBytes, fmt.Sprintf("Should have sent %v bytes but was %v", expectedBytes, totalBytesSent)) + + err = indexer.Index(testIndex, "user", "2", "", "", nil, data) + waitFor(func() bool { + return buffers.Length() > 1 + }, 5) + + // this will test to ensure that Flush actually catches a doc + indexer.Flush() + totalBytesSent = totalBytesSent - len(*eshost) + assert.T(t, err == nil, fmt.Sprintf("Should have nil error =%v", err)) + assert.T(t, buffers.Length() == 2, fmt.Sprintf("Should have another buffer ct=%d", buffers.Length())) + + assert.T(t, indexer.NumErrors() == 0, fmt.Sprintf("Should not have any errors %d", indexer.NumErrors())) + expectedBytes = 220 + assert.T(t, closeInt(totalBytesSent, expectedBytes), fmt.Sprintf("Should have sent %v bytes but was %v", expectedBytes, totalBytesSent)) + + indexer.Stop() +} + +func TestRefreshParam(t *testing.T) { + requrlChan := make(chan *url.URL, 1) + InitTests(true) + c := NewTestConn() + c.RequestTracer = func(method, urlStr, body string) { + requrl, _ := url.Parse(urlStr) + requrlChan <- requrl + } + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + + // Now tests small batches + indexer := c.NewBulkIndexer(1) + indexer.Refresh = true + + indexer.Start() + <-time.After(time.Millisecond * 20) + + indexer.Index("users", "user", "2", "", "", &date, data) + + <-time.After(time.Millisecond * 200) + // indexer.Flush() + indexer.Stop() + requrl := <-requrlChan + assert.T(t, requrl.Query().Get("refresh") == "true", "Should have set refresh query param to true") +} + +func TestWithoutRefreshParam(t *testing.T) { + requrlChan := make(chan *url.URL, 1) + InitTests(true) + c := NewTestConn() + c.RequestTracer = func(method, urlStr, body string) { + requrl, _ := url.Parse(urlStr) + requrlChan <- requrl + } + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + + // Now tests small batches + indexer := c.NewBulkIndexer(1) + + indexer.Start() + <-time.After(time.Millisecond * 20) + + indexer.Index("users", "user", "2", "", "", &date, data) + + <-time.After(time.Millisecond * 200) + // indexer.Flush() + indexer.Stop() + requrl := <-requrlChan + assert.T(t, requrl.Query().Get("refresh") == "false", "Should have set refresh query param to false") +} + +// currently broken in drone.io +func XXXTestBulkUpdate(t *testing.T) { + var ( + buffers = NewSharedBuffer() + totalBytesSent int + messageSets int + ) + + InitTests(true) + c := NewTestConn() + c.Port = "9200" + indexer := c.NewBulkIndexer(3) + indexer.Sender = func(buf *bytes.Buffer) error { + messageSets += 1 + totalBytesSent += buf.Len() + buffers.Append(buf) + return indexer.Send(buf) + } + indexer.Start() + + date := time.Unix(1257894000, 0) + user := map[string]interface{}{ + "name": "smurfs", "age": 22, "date": date, "count": 1, + } + + // Lets make sure the data is in the index ... + _, err := c.Index("users", "user", "5", nil, user) + + // script and params + data := map[string]interface{}{ + "script": "ctx._source.count += 2", + } + err = indexer.Update("users", "user", "5", "", "", &date, data) + // So here's the deal. Flushing does seem to work, you just have to give the + // channel a moment to recieve the message ... + // <- time.After(time.Millisecond * 20) + // indexer.Flush() + + waitFor(func() bool { + return buffers.Length() > 0 + }, 5) + + indexer.Stop() + + assert.T(t, indexer.NumErrors() == 0 && err == nil, fmt.Sprintf("Should not have any errors, bulkErrorCt:%v, err:%v", indexer.NumErrors(), err)) + + response, err := c.Get("users", "user", "5", nil) + assert.T(t, err == nil, fmt.Sprintf("Should not have any errors %v", err)) + m := make(map[string]interface{}) + json.Unmarshal([]byte(*response.Source), &m) + newCount := m["count"] + assert.T(t, newCount.(float64) == 3, + fmt.Sprintf("Should have update count: %#v ... %#v", m["count"], response)) +} + +func TestBulkSmallBatch(t *testing.T) { + var ( + messageSets int + ) + + InitTests(true) + c := NewTestConn() + + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + + // Now tests small batches + indexer := c.NewBulkIndexer(1) + indexer.BufferDelayMax = 100 * time.Millisecond + indexer.BulkMaxDocs = 2 + messageSets = 0 + indexer.Sender = func(buf *bytes.Buffer) error { + messageSets += 1 + return indexer.Send(buf) + } + indexer.Start() + <-time.After(time.Millisecond * 20) + + indexer.Index("users", "user", "2", "", "", &date, data) + indexer.Index("users", "user", "3", "", "", &date, data) + indexer.Index("users", "user", "4", "", "", &date, data) + <-time.After(time.Millisecond * 200) + // indexer.Flush() + indexer.Stop() + assert.T(t, messageSets == 2, fmt.Sprintf("Should have sent 2 message sets %d", messageSets)) + +} + +func TestBulkDelete(t *testing.T) { + InitTests(true) + var lock sync.Mutex + c := NewTestConn() + indexer := c.NewBulkIndexer(1) + sentBytes := []byte{} + + indexer.Sender = func(buf *bytes.Buffer) error { + lock.Lock() + sentBytes = append(sentBytes, buf.Bytes()...) + lock.Unlock() + return nil + } + + indexer.Start() + + indexer.Delete("fake", "fake_type", "1") + + indexer.Flush() + indexer.Stop() + + lock.Lock() + sent := string(sentBytes) + lock.Unlock() + + expected := `{"delete":{"_index":"fake","_type":"fake_type","_id":"1"}} +` + asExpected := sent == expected + assert.T(t, asExpected, fmt.Sprintf("Should have sent '%s' but actually sent '%s'", expected, sent)) +} + +func XXXTestBulkErrors(t *testing.T) { + // lets set a bad port, and hope we get a conn refused error? + c := NewTestConn() + c.Port = "27845" + defer func() { + c.Port = "9200" + }() + indexer := c.NewBulkIndexerErrors(10, 1) + indexer.Start() + errorCt := 0 + go func() { + for i := 0; i < 20; i++ { + date := time.Unix(1257894000, 0) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": date} + indexer.Index("users", "user", strconv.Itoa(i), "", "", &date, data) + } + }() + var errBuf *ErrorBuffer + for errBuf = range indexer.ErrorChannel { + errorCt++ + break + } + if errBuf.Buf.Len() > 0 { + gou.Debug(errBuf.Err) + } + assert.T(t, errorCt > 0, fmt.Sprintf("ErrorCt should be > 0 %d", errorCt)) + indexer.Stop() +} + +/* +BenchmarkSend 18:33:00 bulk_test.go:131: Sent 1 messages in 0 sets totaling 0 bytes +18:33:00 bulk_test.go:131: Sent 100 messages in 1 sets totaling 145889 bytes +18:33:01 bulk_test.go:131: Sent 10000 messages in 100 sets totaling 14608888 bytes +18:33:05 bulk_test.go:131: Sent 20000 messages in 99 sets totaling 14462790 bytes + 20000 234526 ns/op + +*/ +func BenchmarkSend(b *testing.B) { + InitTests(true) + c := NewTestConn() + b.StartTimer() + totalBytes := 0 + sets := 0 + indexer := c.NewBulkIndexer(1) + indexer.Sender = func(buf *bytes.Buffer) error { + totalBytes += buf.Len() + sets += 1 + //log.Println("got bulk") + return indexer.Send(buf) + } + for i := 0; i < b.N; i++ { + about := make([]byte, 1000) + rand.Read(about) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} + indexer.Index("users", "user", strconv.Itoa(i), "", "", nil, data) + } + log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) + if indexer.NumErrors() != 0 { + b.Fail() + } +} + +/* +TODO: this should be faster than above + +BenchmarkSendBytes 18:33:05 bulk_test.go:169: Sent 1 messages in 0 sets totaling 0 bytes +18:33:05 bulk_test.go:169: Sent 100 messages in 2 sets totaling 292299 bytes +18:33:09 bulk_test.go:169: Sent 10000 messages in 99 sets totaling 14473800 bytes + 10000 373529 ns/op + +*/ +func BenchmarkSendBytes(b *testing.B) { + InitTests(true) + c := NewTestConn() + about := make([]byte, 1000) + rand.Read(about) + data := map[string]interface{}{"name": "smurfs", "age": 22, "date": time.Unix(1257894000, 0), "about": about} + body, _ := json.Marshal(data) + b.StartTimer() + totalBytes := 0 + sets := 0 + indexer := c.NewBulkIndexer(1) + indexer.Sender = func(buf *bytes.Buffer) error { + totalBytes += buf.Len() + sets += 1 + return indexer.Send(buf) + } + for i := 0; i < b.N; i++ { + indexer.Index("users", "user", strconv.Itoa(i), "", "", nil, body) + } + log.Printf("Sent %d messages in %d sets totaling %d bytes \n", b.N, sets, totalBytes) + if indexer.NumErrors() != 0 { + b.Fail() + } +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/coreexample_test.go b/vendor/github.com/mattbaird/elastigo/lib/coreexample_test.go new file mode 100644 index 0000000000..fed31dfee5 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/coreexample_test.go @@ -0,0 +1,52 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo_test + +import ( + "bytes" + "fmt" + "strconv" + + elastigo "github.com/mattbaird/elastigo/lib" +) + +// The simplest usage of background bulk indexing +func ExampleBulkIndexer_simple() { + c := elastigo.NewConn() + + indexer := c.NewBulkIndexerErrors(10, 60) + indexer.Start() + indexer.Index("twitter", "user", "1", "", "", nil, `{"name":"bob"}`) + indexer.Stop() +} + +// The inspecting the response +func ExampleBulkIndexer_responses() { + c := elastigo.NewConn() + + indexer := c.NewBulkIndexer(10) + // Create a custom Sender Func, to allow inspection of response/error + indexer.Sender = func(buf *bytes.Buffer) error { + // @buf is the buffer of docs about to be written + respJson, err := c.DoCommand("POST", "/_bulk", nil, buf) + if err != nil { + // handle it better than this + fmt.Println(string(respJson)) + } + return err + } + indexer.Start() + for i := 0; i < 20; i++ { + indexer.Index("twitter", "user", strconv.Itoa(i), "", "", nil, `{"name":"bob"}`) + } + indexer.Stop() +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/corepercolate_test.go b/vendor/github.com/mattbaird/elastigo/lib/corepercolate_test.go new file mode 100644 index 0000000000..55a0713a1e --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/corepercolate_test.go @@ -0,0 +1,64 @@ +package elastigo + +import ( + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +const ( + percIndexName = "test-perc-index" +) + +func TestPercolate(t *testing.T) { + Convey("With a registered percolator", t, func() { + c := NewTestConn() + _, createErr := c.CreateIndex(percIndexName) + So(createErr, ShouldBeNil) + defer c.DeleteIndex(percIndexName) + + options := `{ + "percType": { + "properties": { + "message": { + "type": "string" + } + } + } + }` + + err := c.PutMappingFromJSON(percIndexName, "percType", []byte(options)) + So(err, ShouldBeNil) + + data := `{ + "query": { + "match": { + "message": "bonsai tree" + } + } + }` + + _, err = c.RegisterPercolate(percIndexName, "PERCID", data) + So(err, ShouldBeNil) + + Convey("That matches the document", func() { + // Should return the percolator id (registered query) + doc := `{"doc": { "message": "A new bonsai tree in the office" }}` + + result, err := c.Percolate(percIndexName, "percType", "", nil, doc) + So(err, ShouldBeNil) + So(len(result.Matches), ShouldEqual, 1) + match := result.Matches[0] + So(match.Id, ShouldEqual, "PERCID") + So(match.Index, ShouldEqual, percIndexName) + }) + + Convey("That does not match the document", func() { + // Should NOT return the percolator id (registered query) + doc := `{"doc": { "message": "Barren wasteland with no matches" }}` + + result, err := c.Percolate(percIndexName, "percType", "", nil, doc) + So(err, ShouldBeNil) + So(len(result.Matches), ShouldEqual, 0) + }) + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/coresearch_test.go b/vendor/github.com/mattbaird/elastigo/lib/coresearch_test.go new file mode 100644 index 0000000000..a16d6fec2d --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/coresearch_test.go @@ -0,0 +1,83 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +type SuggestTest struct { + Completion string `json:"completion"` +} + +type hash map[string]interface{} + +func TestCoreSearch(t *testing.T) { + + c := NewTestConn() + c.CreateIndex("github") + waitFor(func() bool { return false }, 5) + + defer func() { + c.DeleteIndex("github") + }() + + Convey("Convert a search result to JSON", t, func() { + + qry := map[string]interface{}{ + "query": map[string]interface{}{ + "wildcard": map[string]string{"actor": "a*"}, + }, + } + var args map[string]interface{} + out, err := c.Search("github", "", args, qry) + So(err, ShouldBeNil) + + _, err = json.Marshal(out.Hits.Hits) + So(err, ShouldBeNil) + }) + + Convey("Update a document and verify that it is reflected", t, func() { + mappingOpts := MappingOptions{Properties: hash{ + "completion": hash{ + "type": "completion", + }, + }} + err := c.PutMapping("github", "SuggestTest", SuggestTest{}, mappingOpts) + So(err, ShouldBeNil) + + _, err = c.UpdateWithPartialDoc("github", "SuggestTest", "1", nil, SuggestTest{"foobar"}, true) + So(err, ShouldBeNil) + + query := hash{"completion_completion": hash{ + "text": "foo", + "completion": hash{ + "size": 10, + "field": "completion", + }, + }} + + _, err = c.Refresh("github") + So(err, ShouldBeNil) + + res, err := c.Suggest("github", nil, query) + So(err, ShouldBeNil) + + opts, err := res.Result("completion_completion") + So(err, ShouldBeNil) + + So(len(opts[0].Options), ShouldBeGreaterThan, 0) + So(opts[0].Options[0].Text, ShouldEqual, "foobar") + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/coretest_test.go b/vendor/github.com/mattbaird/elastigo/lib/coretest_test.go new file mode 100644 index 0000000000..db85908447 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/coretest_test.go @@ -0,0 +1,198 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/md5" + "encoding/json" + "flag" + "fmt" + "io" + "log" + "net/http" + "time" +) + +/* + +usage: + + test -v -host eshost -loaddata + +*/ + +const ( + testIndex = "github" +) + +var ( + bulkStarted bool + hasStartedTesting bool + hasLoadedData bool + sleepAfterLoad bool + loadData *bool = flag.Bool("loaddata", false, "This loads a bunch of test data into elasticsearch for testing") + sleep *int = flag.Int("sleep", 0, "Post bulk loading sleep test to make drone.io work") +) + +func InitTests(startIndexer bool) *Conn { + c := NewConn() + + if !hasStartedTesting { + flag.Parse() + hasStartedTesting = true + log.SetFlags(log.Ltime | log.Lshortfile) + c.Domain = *eshost + } + if startIndexer && !bulkStarted { + bulkStarted = true + b := c.NewBulkIndexer(100) + b.Start() + if *loadData && !hasLoadedData { + log.Println("loading test data ") + hasLoadedData = true + LoadTestData() + } + b.Stop() + } + c.Flush("_all") + c.Refresh("_all") + if !sleepAfterLoad { + time.Sleep(time.Duration(*sleep) * time.Second) + } + sleepAfterLoad = true + return c +} + +func NewTestConn() *Conn { + c := NewConn() + c.Domain = *eshost + return c +} + +// Wait for condition (defined by func) to be true, a utility to create a ticker +// checking every 100 ms to see if something (the supplied check func) is done +// +// waitFor(func() bool { +// return ctr.Ct == 0 +// }, 10) +// +// @timeout (in seconds) is the last arg +func waitFor(check func() bool, timeoutSecs int) { + timer := time.NewTicker(100 * time.Millisecond) + tryct := 0 + for range timer.C { + if check() { + timer.Stop() + break + } + if tryct >= timeoutSecs*10 { + timer.Stop() + break + } + tryct++ + } +} + +type GithubEvent struct { + Url string + Created time.Time `json:"created_at"` + Type string +} + +// This loads test data from github archives (~6700 docs) +func LoadTestData() { + c := NewConn() + c.Domain = *eshost + + c.DeleteIndex(testIndex) + + docCt := 0 + errCt := 0 + indexer := c.NewBulkIndexer(1) + indexer.Sender = func(buf *bytes.Buffer) error { + // log.Printf("Sent %d bytes total %d docs sent", buf.Len(), docCt) + req, err := c.NewRequest("POST", "/_bulk", "") + if err != nil { + errCt += 1 + log.Fatalf("ERROR: %v", err) + return err + } + req.SetBody(buf) + // res, err := http.DefaultClient.Do(*(api.Request(req))) + var response map[string]interface{} + httpStatusCode, _, err := req.Do(&response) + if err != nil { + errCt += 1 + log.Fatalf("ERROR: %v", err) + return err + } + if httpStatusCode != 200 { + log.Fatalf("Not 200! %d %q\n", httpStatusCode, buf.String()) + } + return nil + } + indexer.Start() + resp, err := http.Get("http://data.githubarchive.org/2012-12-10-15.json.gz") + if err != nil || resp == nil { + panic("Could not download data") + } + defer resp.Body.Close() + if err != nil { + log.Println(err) + return + } + gzReader, err := gzip.NewReader(resp.Body) + defer gzReader.Close() + if err != nil { + panic(err) + } + r := bufio.NewReader(gzReader) + var ge GithubEvent + docsm := make(map[string]bool) + h := md5.New() + for { + line, err := r.ReadBytes('\n') + if err != nil { + if err == io.EOF { + indexer.Flush() + break + } + log.Fatalf("could not read line: %v", err) + } + if err := json.Unmarshal(line, &ge); err == nil { + // create an "ID" + h.Write(line) + id := fmt.Sprintf("%x", h.Sum(nil)) + if _, ok := docsm[id]; ok { + log.Println("HM, already exists? ", ge.Url) + } + docsm[id] = true + indexer.Index(testIndex, ge.Type, id, "", "", &ge.Created, line) + docCt++ + } else { + log.Println("ERROR? ", string(line)) + } + } + if errCt != 0 { + log.Println("FATAL, could not load ", errCt) + } + // lets wait a bit to ensure that elasticsearch finishes? + indexer.Stop() + if len(docsm) != docCt { + panic(fmt.Sprintf("Docs didn't match? %d:%d", len(docsm), docCt)) + } + c.Flush(testIndex) + c.Refresh(testIndex) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go b/vendor/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go new file mode 100644 index 0000000000..30e469c3fc --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/indicesdeletemapping_test.go @@ -0,0 +1,54 @@ +package elastigo + +import ( + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" +) + +func TestDeleteMapping(t *testing.T) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method != "DELETE" { + t.Errorf("Expected HTTP Verb, DELETE") + } + + if r.URL.Path == "/this/exists" { + w.Write([]byte(`{"acknowledged": true}`)) + } else if r.URL.Path == "/this/not_exists" { + w.WriteHeader(http.StatusNotFound) + w.Write([]byte(`{"error": "TypeMissingException[[_all] type[[not_exists]] missing: No index has the type.]","status": 404}`)) + } else { + t.Errorf("Unexpected request path, %s", r.URL.Path) + } + })) + defer ts.Close() + + serverURL, _ := url.Parse(ts.URL) + + c := NewTestConn() + + c.Domain = strings.Split(serverURL.Host, ":")[0] + c.Port = strings.Split(serverURL.Host, ":")[1] + + _, err := c.DeleteMapping("this", "exists") + if err != nil { + t.Errorf("Expected no error and got, %s", err) + } + + _, err = c.DeleteMapping("this", "not_exists") + if err == nil { + t.Errorf("Expected error and got none deleting /this/not_exists") + } + + _, err = c.DeleteMapping("", "two") + if err == nil { + t.Errorf("Expected error for no index and got none") + } + + _, err = c.DeleteMapping("one", "") + if err == nil { + t.Errorf("Expected error for no mapping and got none") + } +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go b/vendor/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go new file mode 100644 index 0000000000..cc0072c004 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/indicesputmapping_test.go @@ -0,0 +1,356 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "sort" + "strings" + "testing" +) + +var ( + mux *http.ServeMux + server *httptest.Server +) + +func setup(t *testing.T) *Conn { + mux = http.NewServeMux() + server = httptest.NewServer(mux) + c := NewTestConn() + + serverURL, err := url.Parse(server.URL) + if err != nil { + t.Fatalf("Error: %v", err) + } + + c.Domain = strings.Split(serverURL.Host, ":")[0] + c.Port = strings.Split(serverURL.Host, ":")[1] + + return c +} + +func teardown() { + server.Close() +} + +type TestStruct struct { + Id string `json:"id" elastic:"index:not_analyzed"` + DontIndex string `json:"dontIndex" elastic:"index:no"` + Number int `json:"number" elastic:"type:integer,index:analyzed"` + Omitted string `json:"-"` + NoJson string `elastic:"type:string"` + unexported string + JsonOmitEmpty string `json:"jsonOmitEmpty,omitempty" elastic:"type:string"` + Embedded + Inner InnerStruct `json:"inner"` + InnerP *InnerStruct `json:"pointer_to_inner"` + InnerS []InnerStruct `json:"slice_of_inner"` + MultiAnalyze string `json:"multi_analyze"` + NestedObject NestedStruct `json:"nestedObject" elastic:"type:nested"` +} + +type Embedded struct { + EmbeddedField string `json:"embeddedField" elastic:"type:string"` +} + +type InnerStruct struct { + InnerField string `json:"innerField" elastic:"type:date"` +} + +type NestedStruct struct { + InnerField string `json:"innerField" elastic:"type:date"` +} + +// Sorting string +// RuneSlice implements sort.Interface (http://golang.org/pkg/sort/#Interface) +type RuneSlice []rune + +func (p RuneSlice) Len() int { return len(p) } +func (p RuneSlice) Less(i, j int) bool { return p[i] < p[j] } +func (p RuneSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// sorted func returns string with sorted characters +func sorted(s string) string { + runes := []rune(s) + sort.Sort(RuneSlice(runes)) + return string(runes) +} + +func TestPutMapping(t *testing.T) { + c := setup(t) + defer teardown() + + options := MappingOptions{ + Timestamp: TimestampOptions{Enabled: true}, + Id: IdOptions{Index: "analyzed", Path: "id"}, + Parent: &ParentOptions{Type: "testParent"}, + TTL: &TTLOptions{Enabled: true, Default: "1w"}, + Properties: map[string]interface{}{ + // special properties that can't be expressed as tags + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + }, + DynamicTemplates: []map[string]interface{}{ + map[string]interface{}{ + "strings": map[string]interface{}{ + "match_mapping_type": "string", + "mapping": map[string]interface{}{ + "type": "string", + "index": "not_analyzed", + }, + }, + }, + }, + } + + expValue := MappingForType("myType", MappingOptions{ + Timestamp: TimestampOptions{Enabled: true}, + Id: IdOptions{Index: "analyzed", Path: "id"}, + Parent: &ParentOptions{Type: "testParent"}, + TTL: &TTLOptions{Enabled: true, Default: "1w"}, + Properties: map[string]interface{}{ + "NoJson": map[string]string{"type": "string"}, + "dontIndex": map[string]string{"index": "no"}, + "embeddedField": map[string]string{"type": "string"}, + "id": map[string]string{"index": "not_analyzed"}, + "jsonOmitEmpty": map[string]string{"type": "string"}, + "number": map[string]string{"index": "analyzed", "type": "integer"}, + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + "inner": map[string]map[string]map[string]string{ + "properties": { + "innerField": {"type": "date"}, + }, + }, + "pointer_to_inner": map[string]map[string]map[string]string{ + "properties": { + "innerField": {"type": "date"}, + }, + }, + "slice_of_inner": map[string]map[string]map[string]string{ + "properties": { + "innerField": {"type": "date"}, + }, + }, + "nestedObject": map[string]interface{}{ + "type": "nested", + "properties": map[string]map[string]string{ + "innerField": {"type": "date"}, + }, + }, + }, + DynamicTemplates: []map[string]interface{}{ + map[string]interface{}{ + "strings": map[string]interface{}{ + "match_mapping_type": "string", + "mapping": map[string]interface{}{ + "type": "string", + "index": "not_analyzed", + }, + }, + }, + }, + }) + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + var value map[string]interface{} + bd, err := ioutil.ReadAll(r.Body) + json.NewDecoder(strings.NewReader(string(bd))).Decode(&value) + expValJson, err := json.MarshalIndent(expValue, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + valJson, err := json.MarshalIndent(value, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + + if sorted(string(expValJson)) != sorted(string(valJson)) { + t.Errorf("Expected %s but got %s", string(expValJson), string(valJson)) + } + }) + + err := c.PutMapping("myIndex", "myType", TestStruct{}, options) + if err != nil { + t.Errorf("Error: %v", err) + } +} + +func TestPutMappingFromJSON(t *testing.T) { + c := setup(t) + defer teardown() + /* + options := MappingOptions{ + Timestamp: TimestampOptions{Enabled: true}, + Id: IdOptions{Index: "analyzed", Path: "id"}, + Parent: &ParentOptions{Type: "testParent"}, + Properties: map[string]interface{}{ + // special properties that can't be expressed as tags + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + }, + DynamicTemplates: []map[string]interface{}{ + "strings": map[string]interface{}{ + "match_mapping_type": "string", + "mapping": { + "type": "string", + "index": "not_analyzed", + }, + }, + }, + } + */ + + options := `{ + "myType": { + "_id": { + "index": "analyzed", + "path": "id" + }, + "_timestamp": { + "enabled": true + }, + "_parent": { + "type": "testParent" + }, + "properties": { + "analyzed_string": { + "type": "string", + "index": "analyzed" + }, + "multi_analyze": { + "type": "multi_field", + "fields": { + "ma_analyzed": { + "type": "string", + "index": "analyzed" + }, + "ma_notanalyzed": { + "type": "string", + "index": "not_analyzed" + } + } + } + }, + "dynamic_templates": [ + { + "strings": { + "match_mapping_type": "string", + "mapping": { + "type": "string", + "index": "not_analyzed" + } + } + } + ] + } + }` + + expValue := map[string]interface{}{ + "myType": map[string]interface{}{ + "_timestamp": map[string]interface{}{ + "enabled": true, + }, + "_id": map[string]interface{}{ + "index": "analyzed", + "path": "id", + }, + "_parent": map[string]interface{}{ + "type": "testParent", + }, + "properties": map[string]interface{}{ + "analyzed_string": map[string]string{ + "type": "string", + "index": "analyzed", + }, + "multi_analyze": map[string]interface{}{ + "type": "multi_field", + "fields": map[string]map[string]string{ + "ma_analyzed": {"type": "string", "index": "analyzed"}, + "ma_notanalyzed": {"type": "string", "index": "not_analyzed"}, + }, + }, + }, + "dynamic_templates": []map[string]interface{}{ + map[string]interface{}{ + "strings": map[string]interface{}{ + "match_mapping_type": "string", + "mapping": map[string]interface{}{ + "type": "string", + "index": "not_analyzed", + }, + }, + }, + }, + }, + } + + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + var value map[string]interface{} + bd, err := ioutil.ReadAll(r.Body) + err = json.Unmarshal(bd, &value) + if err != nil { + t.Errorf("Got error: %v", err) + } + expValJson, err := json.MarshalIndent(expValue, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + + valJson, err := json.MarshalIndent(value, "", " ") + if err != nil { + t.Errorf("Got error: %v", err) + } + + if sorted(string(expValJson)) != sorted(string(valJson)) { + t.Errorf("Expected %s but got %s", string(expValJson), string(valJson)) + } + }) + + err := c.PutMappingFromJSON("myIndex", "myType", []byte(options)) + if err != nil { + t.Errorf("Error: %v", err) + } +} + +type StructWithEmptyElasticTag struct { + Field string `json:"field" elastic:""` +} + +func TestPutMapping_empty_elastic_tag_is_accepted(t *testing.T) { + properties := map[string]interface{}{} + getProperties(reflect.TypeOf(StructWithEmptyElasticTag{}), properties) + if len(properties) != 0 { + t.Errorf("Expected empty properites but got: %v", properties) + } +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/request_test.go b/vendor/github.com/mattbaird/elastigo/lib/request_test.go new file mode 100644 index 0000000000..ee42fa2afa --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/request_test.go @@ -0,0 +1,200 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "net/http" + "strings" + "testing" + + "github.com/bmizerany/assert" +) + +func TestQueryString(t *testing.T) { + // Test nil argument + s, err := Escape(nil) + assert.T(t, s == "" && err == nil, fmt.Sprintf("Nil should not fail and yield empty string")) + + // Test single string argument + s, err = Escape(map[string]interface{}{"foo": "bar"}) + exp := "foo=bar" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single int argument + s, err = Escape(map[string]interface{}{"foo": int(1)}) + exp = "foo=1" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single int64 argument + s, err = Escape(map[string]interface{}{"foo": int64(1)}) + exp = "foo=1" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single int32 argument + s, err = Escape(map[string]interface{}{"foo": int32(1)}) + exp = "foo=1" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single float64 argument + s, err = Escape(map[string]interface{}{"foo": float64(3.141592)}) + exp = "foo=3.141592" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single float32 argument + s, err = Escape(map[string]interface{}{"foo": float32(3.141592)}) + exp = "foo=3.141592" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test single []string argument + s, err = Escape(map[string]interface{}{"foo": []string{"bar", "baz"}}) + exp = "foo=bar%2Cbaz" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test combination of all arguments + s, err = Escape(map[string]interface{}{ + "foo": "bar", + "bar": 1, + "baz": 3.141592, + "test": []string{"a", "b"}, + }) + // url.Values also orders arguments alphabetically. + exp = "bar=1&baz=3.141592&foo=bar&test=a%2Cb" + assert.T(t, s == exp && err == nil, fmt.Sprintf("Expected %s, got: %s", exp, s)) + + // Test invalid datatype + s, err = Escape(map[string]interface{}{"foo": []int{}}) + assert.T(t, err != nil, fmt.Sprintf("Expected err to not be nil")) +} + +func TestDoResponseError(t *testing.T) { + v := make(map[string]string) + conn := NewConn() + req, _ := conn.NewRequest("GET", "http://mock.com", "") + req.Client = http.DefaultClient + defer func() { + req.Client.Transport = http.DefaultTransport + }() + + // application/json + req.Client.Transport = newMockTransport(500, "application/json", `{"error":"internal_server_error"}`) + res, bodyBytes, err := req.DoResponse(&v) + assert.NotEqual(t, nil, res) + assert.Equal(t, nil, err) + assert.Equal(t, 500, res.StatusCode) + assert.Equal(t, "application/json", res.Header.Get("Content-Type")) + assert.Equal(t, "internal_server_error", v["error"]) + assert.Equal(t, []byte(`{"error":"internal_server_error"}`), bodyBytes) + + // text/html + v = make(map[string]string) + req.Client.Transport = newMockTransport(500, "text/html", "HTTP 500 Internal Server Error") + res, bodyBytes, err = req.DoResponse(&v) + assert.T(t, res == nil, fmt.Sprintf("Expected nil, got: %v", res)) + assert.NotEqual(t, nil, err) + assert.Equal(t, 0, len(v)) + assert.Equal(t, []byte("HTTP 500 Internal Server Error"), bodyBytes) + assert.Equal(t, fmt.Errorf(http.StatusText(500)), err) + + // mime error + v = make(map[string]string) + req.Client.Transport = newMockTransport(500, "", "HTTP 500 Internal Server Error") + res, bodyBytes, err = req.DoResponse(&v) + assert.T(t, res == nil, fmt.Sprintf("Expected nil, got: %v", res)) + assert.NotEqual(t, nil, err) + assert.Equal(t, 0, len(v)) + assert.Equal(t, []byte("HTTP 500 Internal Server Error"), bodyBytes) + assert.NotEqual(t, fmt.Errorf(http.StatusText(500)), err) +} + +type mockTransport struct { + statusCode int + contentType string + body string +} + +func newMockTransport(statusCode int, contentType, body string) http.RoundTripper { + return &mockTransport{ + statusCode: statusCode, + contentType: contentType, + body: body, + } +} + +func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { + response := &http.Response{ + Header: make(http.Header), + Request: req, + StatusCode: t.statusCode, + } + response.Header.Set("Content-Type", t.contentType) + response.Body = ioutil.NopCloser(strings.NewReader(t.body)) + return response, nil +} + +func TestSetBodyGzip(t *testing.T) { + s := "foo" + + // test []byte + expB := []byte(s) + actB, err := gzipHelper(t, expB) + assert.T(t, err == nil, fmt.Sprintf("Expected err to be nil")) + assert.T(t, bytes.Compare(actB, expB) == 0, fmt.Sprintf("Expected: %s, got: %s", expB, actB)) + + // test string + expS := s + actS, err := gzipHelper(t, expS) + assert.T(t, err == nil, fmt.Sprintf("Expected err to be nil")) + assert.T(t, string(actS) == expS, fmt.Sprintf("Expected: %s, got: %s", expS, actS)) + + // test io.Reader + expR := strings.NewReader(s) + actR, err := gzipHelper(t, expR) + assert.T(t, err == nil, fmt.Sprintf("Expected err to be nil")) + assert.T(t, bytes.Compare([]byte(s), actR) == 0, fmt.Sprintf("Expected: %s, got: %s", s, actR)) + + // test other + expO := testStruct{Name: "Travis"} + actO, err := gzipHelper(t, expO) + assert.T(t, err == nil, fmt.Sprintf("Expected err to not be nil")) + assert.T(t, bytes.Compare([]byte(`{"name":"Travis"}`), actO) == 0, fmt.Sprintf("Expected: %s, got: %s", s, actO)) +} + +type testStruct struct { + Name string `json:"name"` +} + +func gzipHelper(t *testing.T, data interface{}) ([]byte, error) { + r, err := http.NewRequest("GET", "http://google.com", nil) + if err != nil { + return nil, err + } + + // test string + req := &Request{ + Request: r, + } + + err = req.SetBodyGzip(data) + if err != nil { + return nil, err + } + + gr, err := gzip.NewReader(req.Body) + if err != nil { + return nil, err + } + + return ioutil.ReadAll(gr) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/searchaggregate_test.go b/vendor/github.com/mattbaird/elastigo/lib/searchaggregate_test.go new file mode 100644 index 0000000000..331809ce65 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/searchaggregate_test.go @@ -0,0 +1,177 @@ +package elastigo + +import ( + "encoding/json" + "reflect" + "testing" +) + +// Test all aggregate types and nested aggregations +func TestAggregateDsl(t *testing.T) { + + min := Aggregate("min_price").Min("price") + max := Aggregate("max_price").Max("price") + sum := Aggregate("sum_price").Sum("price") + avg := Aggregate("avg_price").Avg("price") + stats := Aggregate("stats_price").Stats("price") + extendedStats := Aggregate("extended_stats_price").ExtendedStats("price") + valueCount := Aggregate("value_count_price").ValueCount("price") + percentiles := Aggregate("percentiles_price").Percentiles("price") + cardinality := Aggregate("cardinality_price").Cardinality("price", true, 50) + global := Aggregate("global").Global() + missing := Aggregate("missing_price").Missing("price") + terms := Aggregate("terms_price").Terms("price") + termsSize := Aggregate("terms_price_size").TermsWithSize("price", 0) + significantTerms := Aggregate("significant_terms_price").SignificantTerms("price") + histogram := Aggregate("histogram_price").Histogram("price", 50) + + dateAgg := Aggregate("articles_over_time").DateHistogram("date", "month") + dateAgg.Aggregates( + min, + max, + sum, + avg, + stats, + extendedStats, + valueCount, + percentiles, + cardinality, + global, + missing, + terms, + termsSize, + significantTerms, + histogram, + ) + + qry := Search("github").Aggregates(dateAgg) + + marshaled, err := json.MarshalIndent(qry.AggregatesVal, "", " ") + if err != nil { + t.Errorf("Failed to marshal AggregatesVal: %s", err.Error()) + return + } + + assertJsonMatch( + t, + marshaled, + []byte(` + { + "articles_over_time": { + "date_histogram" : { + "field" : "date", + "interval" : "month" + }, + "aggregations": { + "min_price":{ + "min": { "field": "price" } + }, + "max_price":{ + "max": { "field": "price" } + }, + "sum_price":{ + "sum": { "field": "price" } + }, + "avg_price": { + "avg": { "field": "price" } + }, + "stats_price":{ + "stats": { "field": "price" } + }, + "extended_stats_price":{ + "extended_stats": { "field": "price" } + }, + "value_count_price":{ + "value_count": { "field": "price" } + }, + "percentiles_price":{ + "percentiles": { "field": "price" } + }, + "cardinality_price":{ + "cardinality": { "field": "price", "precision_threshold": 50 } + }, + "global":{ + "global": {} + }, + "missing_price":{ + "missing": { "field": "price" } + }, + "terms_price":{ + "terms": { "field": "price" } + }, + "terms_price_size":{ + "terms": { "field": "price", "size": 0 } + }, + "significant_terms_price":{ + "significant_terms": { "field": "price" } + }, + "histogram_price":{ + "histogram": { "field": "price", "interval": 50 } + } + } + } + } + `), + ) + +} + +func TestAggregateFilter(t *testing.T) { + + avg := Aggregate("avg_price").Avg("price") + + dateAgg := Aggregate("in_stock_products").Filter( + Filter().Range("stock", nil, 0, nil, nil, ""), + ) + + dateAgg.Aggregates( + avg, + ) + + qry := Search("github").Aggregates(dateAgg) + + marshaled, err := json.MarshalIndent(qry.AggregatesVal, "", " ") + if err != nil { + t.Errorf("Failed to marshal AggregatesVal: %s", err.Error()) + return + } + + assertJsonMatch( + t, + marshaled, + []byte(` + { + "in_stock_products" : { + "filter" : { + "range" : { "stock" : { "gt" : 0 } } + }, + "aggregations" : { + "avg_price" : { "avg" : { "field" : "price" } } + } + } + } + `), + ) +} + +func assertJsonMatch(t *testing.T, match, expected []byte) { + var m interface{} + var e interface{} + + err := json.Unmarshal(expected, &e) + if err != nil { + t.Errorf("Failed to unmarshal expectation: %s", err.Error()) + return + } + err = json.Unmarshal(match, &m) + if err != nil { + t.Errorf("Failed to unmarshal match: %s", err.Error()) + return + } + + if !reflect.DeepEqual(m, e) { + t.Errorf("Expected %s but got %s", string(expected), string(match)) + return + } + +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/searchfacet_test.go b/vendor/github.com/mattbaird/elastigo/lib/searchfacet_test.go new file mode 100644 index 0000000000..11e2664a8b --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/searchfacet_test.go @@ -0,0 +1,42 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "github.com/araddon/gou" + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestFacetRegex(t *testing.T) { + + c := NewTestConn() + PopulateTestDB(t, c) + defer TearDownTestDB(c) + + Convey("Facted regex query", t, func() { + + // This is a possible solution for auto-complete + out, err := Search("oilers").Size("0").Facet( + Facet().Regex("name", "[jk].*").Size("8"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + // Debug(string(out.Facets)) + fh := gou.NewJsonHelper([]byte(out.Facets)) + facets := fh.Helpers("/name/terms") + So(err, ShouldBeNil) + So(facets, ShouldNotBeNil) + So(len(facets), ShouldEqual, 4) + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/searchfilter_test.go b/vendor/github.com/mattbaird/elastigo/lib/searchfilter_test.go new file mode 100644 index 0000000000..a9f6316e0f --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/searchfilter_test.go @@ -0,0 +1,287 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestFilterDsl(t *testing.T) { + Convey("And filter", t, func() { + filter := Filter().And(Filter().Term("test", "asdf")). + And(Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) + actual, err := GetJson(filter) + + actualFilters := actual["and"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(2, ShouldEqual, len(actualFilters)) + So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) + So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) + }) + + Convey("Or filter", t, func() { + filter := Filter().Or(Filter().Term("test", "asdf"), Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) + actual, err := GetJson(filter) + + actualFilters := actual["or"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(2, ShouldEqual, len(actualFilters)) + So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) + So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) + }) + + Convey("Not filter", t, func() { + filter := Filter().Not(Filter().Term("test", "asdf")). + Not(Filter().Range("rangefield", 1, 2, 3, 4, "+08:00")) + actual, err := GetJson(filter) + + actualFilters := actual["not"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(2, ShouldEqual, len(actualFilters)) + So(true, ShouldEqual, HasKey(actualFilters[0].(map[string]interface{}), "term")) + So(true, ShouldEqual, HasKey(actualFilters[1].(map[string]interface{}), "range")) + }) + + Convey("Terms filter", t, func() { + filter := Filter().Terms("Sample", TEMAnd, "asdf", 123, true) + actual, err := GetJson(filter) + + actualTerms := actual["terms"].(map[string]interface{}) + actualValues := actualTerms["Sample"].([]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(3, ShouldEqual, len(actualValues)) + So(actualValues[0], ShouldEqual, "asdf") + So(actualValues[1], ShouldEqual, float64(123)) + So(actualValues[2], ShouldEqual, true) + So("and", ShouldEqual, actualTerms["execution"]) + }) + + Convey("Term filter", t, func() { + filter := Filter().Term("Sample", "asdf").Term("field2", 341.4) + actual, err := GetJson(filter) + + actualTerm := actual["term"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("asdf", ShouldEqual, actualTerm["Sample"]) + So(float64(341.4), ShouldEqual, actualTerm["field2"]) + }) + + Convey("Range filter", t, func() { + filter := Filter().Range("rangefield", 1, 2, 3, 4, "+08:00") + actual, err := GetJson(filter) + //A bit lazy, probably should assert keys exist + actualRange := actual["range"].(map[string]interface{})["rangefield"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(float64(1), ShouldEqual, actualRange["gte"]) + So(float64(2), ShouldEqual, actualRange["gt"]) + So(float64(3), ShouldEqual, actualRange["lte"]) + So(float64(4), ShouldEqual, actualRange["lt"]) + So("+08:00", ShouldEqual, actualRange["time_zone"]) + }) + + Convey("Exists filter", t, func() { + filter := Filter().Exists("field1") + actual, err := GetJson(filter) + + actualValue := actual["exists"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("field1", ShouldEqual, actualValue["field"]) + }) + + Convey("Missing filter", t, func() { + filter := Filter().Missing("field1") + actual, err := GetJson(filter) + + actualValue := actual["missing"].(map[string]interface{}) + + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("field1", ShouldEqual, actualValue["field"]) + }) + + Convey("Limit filter", t, func() { + filter := Filter().Limit(100) + actual, err := GetJson(filter) + + actualValue := actual["limit"].(map[string]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(float64(100), ShouldEqual, actualValue["value"]) + }) + + Convey("Type filter", t, func() { + filter := Filter().Type("my_type") + actual, err := GetJson(filter) + + actualValue := actual["type"].(map[string]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So("my_type", ShouldEqual, actualValue["value"]) + }) + + Convey("Ids filter", t, func() { + filter := Filter().Ids("test", "asdf", "fdsa") + actual, err := GetJson(filter) + + actualValue := actual["ids"].(map[string]interface{}) + actualValues := actualValue["values"].([]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(nil, ShouldEqual, actualValue["type"]) + So(3, ShouldEqual, len(actualValues)) + So("test", ShouldEqual, actualValues[0]) + So("asdf", ShouldEqual, actualValues[1]) + So("fdsa", ShouldEqual, actualValues[2]) + }) + + Convey("IdsByTypes filter", t, func() { + filter := Filter().IdsByTypes([]string{"my_type"}, "test", "asdf", "fdsa") + actual, err := GetJson(filter) + + actualValue := actual["ids"].(map[string]interface{}) + actualTypes := actualValue["type"].([]interface{}) + actualValues := actualValue["values"].([]interface{}) + So(err, ShouldBeNil) + So(1, ShouldEqual, len(actual)) + So(1, ShouldEqual, len(actualTypes)) + So("my_type", ShouldEqual, actualTypes[0]) + So(3, ShouldEqual, len(actualValues)) + So("test", ShouldEqual, actualValues[0]) + So("asdf", ShouldEqual, actualValues[1]) + So("fdsa", ShouldEqual, actualValues[2]) + }) + + Convey("GeoDistance filter", t, func() { + filter := Filter().GeoDistance("100km", NewGeoField("pin.location", 32.3, 23.4)) + actual, err := GetJson(filter) + + actualValue := actual["geo_distance"].(map[string]interface{}) + actualLocation := actualValue["pin.location"].(map[string]interface{}) + So(err, ShouldBeNil) + So("100km", ShouldEqual, actualValue["distance"]) + So(float64(32.3), ShouldEqual, actualLocation["lat"]) + So(float64(23.4), ShouldEqual, actualLocation["lon"]) + }) + + Convey("GeoDistanceRange filter", t, func() { + filter := Filter().GeoDistanceRange("100km", "200km", NewGeoField("pin.location", 32.3, 23.4)) + actual, err := GetJson(filter) + + actualValue := actual["geo_distance_range"].(map[string]interface{}) + actualLocation := actualValue["pin.location"].(map[string]interface{}) + So(err, ShouldBeNil) + So("100km", ShouldEqual, actualValue["from"]) + So("200km", ShouldEqual, actualValue["to"]) + So(float64(32.3), ShouldEqual, actualLocation["lat"]) + So(float64(23.4), ShouldEqual, actualLocation["lon"]) + }) +} + +func TestFilters(t *testing.T) { + + c := NewTestConn() + PopulateTestDB(t, c) + defer TearDownTestDB(c) + + Convey("Exists filter", t, func() { + qry := Search("oilers").Filter( + Filter().Exists("goals"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 12) + }) + + Convey("Missing filter", t, func() { + qry := Search("oilers").Filter( + Filter().Missing("goals"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("Terms filter", t, func() { + qry := Search("oilers").Filter( + Filter().Terms("pos", TEMDefault, "RW", "LW"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 6) + }) + + Convey("Filter involving an AND", t, func() { + qry := Search("oilers").Filter( + Filter().And( + Filter().Terms("pos", TEMDefault, "LW"), + Filter().Exists("PIM"), + ), + ) + + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("Filterng filter results", t, func() { + qry := Search("oilers").Filter( + Filter().Terms("pos", TEMDefault, "LW"), + ) + qry.Filter( + Filter().Exists("PIM"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("Filter involving OR", t, func() { + qry := Search("oilers").Filter( + Filter().Or( + Filter().Terms("pos", TEMDefault, "G"), + Filter().Range("goals", nil, 80, nil, nil, ""), + ), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 3) + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/searchhighlight_test.go b/vendor/github.com/mattbaird/elastigo/lib/searchhighlight_test.go new file mode 100644 index 0000000000..ca5b9304d8 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/searchhighlight_test.go @@ -0,0 +1,67 @@ +package elastigo + +import ( + "github.com/bmizerany/assert" + "testing" +) + +func TestEmbedDsl(t *testing.T) { + highlight := NewHighlight().SetOptions(NewHighlightOpts(). + Tags("
", "
"). + BoundaryChars("asdf").BoundaryMaxScan(100). + FragSize(10).NumFrags(50). + Order("order").Type("fdsa"). + MatchedFields("1", "2")) + + actual, err := GetJson(highlight) + + assert.Equal(t, nil, err) + assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) + assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) + assert.Equal(t, "asdf", actual["boundary_chars"]) + assert.Equal(t, float64(100), actual["boundary_max_scan"]) + assert.Equal(t, float64(10), actual["fragment_size"]) + assert.Equal(t, float64(50), actual["number_of_fragments"]) + assert.Equal(t, "1", actual["matched_fields"].([]interface{})[0]) + assert.Equal(t, "2", actual["matched_fields"].([]interface{})[1]) + assert.Equal(t, "order", actual["order"]) + assert.Equal(t, "fdsa", actual["type"]) +} + +func TestFieldDsl(t *testing.T) { + highlight := NewHighlight().AddField("whatever", NewHighlightOpts(). + Tags("
", "
"). + BoundaryChars("asdf").BoundaryMaxScan(100). + FragSize(10).NumFrags(50). + Order("order").Type("fdsa"). + MatchedFields("1", "2")) + + result, err := GetJson(highlight) + actual := result["fields"].(map[string]interface{})["whatever"].(map[string]interface{}) + + assert.Equal(t, nil, err) + assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) + assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) + assert.Equal(t, "asdf", actual["boundary_chars"]) + assert.Equal(t, float64(100), actual["boundary_max_scan"]) + assert.Equal(t, float64(10), actual["fragment_size"]) + assert.Equal(t, float64(50), actual["number_of_fragments"]) + assert.Equal(t, "1", actual["matched_fields"].([]interface{})[0]) + assert.Equal(t, "2", actual["matched_fields"].([]interface{})[1]) + assert.Equal(t, "order", actual["order"]) + assert.Equal(t, "fdsa", actual["type"]) +} + +func TestEmbedAndFieldDsl(t *testing.T) { + highlight := NewHighlight(). + SetOptions(NewHighlightOpts().Tags("
", "
")). + AddField("afield", NewHighlightOpts().Type("something")) + + actual, err := GetJson(highlight) + actualField := actual["fields"].(map[string]interface{})["afield"].(map[string]interface{}) + + assert.Equal(t, nil, err) + assert.Equal(t, "
", actual["pre_tags"].([]interface{})[0]) + assert.Equal(t, "
", actual["post_tags"].([]interface{})[0]) + assert.Equal(t, "something", actualField["type"]) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/searchsearch_test.go b/vendor/github.com/mattbaird/elastigo/lib/searchsearch_test.go new file mode 100644 index 0000000000..81ffbdf8f1 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/searchsearch_test.go @@ -0,0 +1,307 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "github.com/araddon/gou" + . "github.com/smartystreets/goconvey/convey" + "testing" +) + +func TestSearch(t *testing.T) { + + c := NewTestConn() + PopulateTestDB(t, c) + defer TearDownTestDB(c) + + Convey("Wildcard request query", t, func() { + + qry := map[string]interface{}{ + "query": map[string]interface{}{ + "wildcard": map[string]string{"name": "*hu*"}, + }, + } + out, err := c.Search("oilers", "", nil, qry) + + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 3) + }) + + Convey("Simple search", t, func() { + + // searching without faceting + qry := Search("oilers").Pretty().Query( + Query().Search("dave"), + ) + + // how many different docs used the word "dave" + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + + out, _ = Search("oilers").Search("dave").Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("URL Request query string", t, func() { + + out, err := c.SearchUri("oilers", "", map[string]interface{}{"q": "pos:LW"}) + + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits, ShouldNotBeNil) + So(out.Hits.Total, ShouldEqual, 3) + }) + + // A faceted search for what "type" of events there are + // - since we are not specifying an elasticsearch type it searches all () + // + // { + // "terms" : { + // "_type" : "terms", + // "missing" : 0, + // "total" : 7561, + // "other" : 0, + // "terms" : [ { + // "term" : "pushevent", + // "count" : 4185 + // }, { + // "term" : "createevent", + // "count" : 786 + // }.....] + // } + // } + + Convey("Facet search simple", t, func() { + + qry := Search("oilers").Pretty().Facet( + Facet().Fields("teams").Size("4"), + ).Query( + Query().All(), + ).Size("1") + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 37) + So(h.Int("teams.missing"), ShouldEqual, 0) + So(len(h.List("teams.terms")), ShouldEqual, 4) + + // change the size + qry.FacetVal.Size("20") + out, err = qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h = gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 37) + So(len(h.List("teams.terms")), ShouldEqual, 11) + + }) + + Convey("Facet search with type", t, func() { + + out, err := Search("oilers").Type("heyday").Pretty().Facet( + Facet().Fields("teams").Size("4"), + ).Query( + Query().All(), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 37) + So(len(h.List("teams.terms")), ShouldEqual, 4) + }) + + Convey("Facet search with wildcard", t, func() { + + qry := Search("oilers").Pretty().Facet( + Facet().Fields("teams").Size("20"), + ).Query( + Query().Search("*w*"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 20) + So(len(h.List("teams.terms")), ShouldEqual, 7) + }) + + Convey("Facet search with range", t, func() { + + qry := Search("oilers").Pretty().Facet( + Facet().Fields("teams").Size("20"), + ).Query( + Query().Range( + Filter().Range("dob", "19600101", nil, "19621231", nil, ""), + ).Search("*w*"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + + h := gou.NewJsonHelper(out.Facets) + So(h.Int("teams.total"), ShouldEqual, 12) + So(len(h.List("teams.terms")), ShouldEqual, 5) + }) + + Convey("Search query with terms", t, func() { + + qry := Search("oilers").Query( + Query().Term("teams", "NYR"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 4) + So(out.Hits.Total, ShouldEqual, 4) + }) + + Convey("Search query with fields", t, func() { + + qry := Search("oilers").Query( + Query().Fields("teams", "NYR", "", ""), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 4) + So(out.Hits.Total, ShouldEqual, 4) + }) + + Convey("Search query with fields exist and missing", t, func() { + + qry := Search("oilers").Filter( + Filter().Exists("PIM"), + ) + out, err := qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 2) + So(out.Hits.Total, ShouldEqual, 2) + + qry = Search("oilers").Filter( + Filter().Missing("PIM"), + ) + out, err = qry.Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 12) + }) + + Convey("Search with query and filter", t, func() { + + out, err := Search("oilers").Size("25").Query( + Query().Fields("name", "*d*", "", ""), + ).Filter( + Filter().Terms("teams", TEMDefault, "STL"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 2) + So(out.Hits.Total, ShouldEqual, 2) + }) + + Convey("Search with range", t, func() { + + out, err := Search("oilers").Size("25").Query( + Query().Range( + Filter().Range("dob", "19600101", nil, "19621231", nil, ""), + ).Search("*w*"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 4) + So(out.Hits.Total, ShouldEqual, 4) + }) + + Convey("Search with sorting desc", t, func() { + + out, err := Search("oilers").Pretty().Query( + Query().All(), + ).Sort( + Sort("dob").Desc(), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 14) + + b, err := out.Hits.Hits[0].Source.MarshalJSON() + h1 := gou.NewJsonHelper(b) + So(h1.String("name"), ShouldEqual, "Grant Fuhr") + }) + + Convey("Search with sorting asc", t, func() { + + out, err := Search("oilers").Pretty().Query( + Query().All(), + ).Sort( + Sort("dob"), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 14) + + b, err := out.Hits.Hits[0].Source.MarshalJSON() + h1 := gou.NewJsonHelper(b) + So(h1.String("name"), ShouldEqual, "Pat Hughes") + }) + + Convey("Search with sorting desc with query", t, func() { + + out, err := Search("oilers").Pretty().Query( + Query().Search("*w*"), + ).Sort( + Sort("dob").Desc(), + ).Result(c) + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 8) + So(out.Hits.Total, ShouldEqual, 8) + + b, err := out.Hits.Hits[0].Source.MarshalJSON() + h1 := gou.NewJsonHelper(b) + So(h1.String("name"), ShouldEqual, "Wayne Gretzky") + }) + + Convey("Search query with filtered source fields", t, func() { + + qry := Search("oilers").SourceFields("name", "goals").Pretty().Query( + Query().All(), + ) + out, err := qry.Result(c) + + So(err, ShouldBeNil) + So(out, ShouldNotBeNil) + So(out.Hits.Len(), ShouldEqual, 10) + So(out.Hits.Total, ShouldEqual, 14) + + b, err := out.Hits.Hits[0].Source.MarshalJSON() + h1 := gou.NewJsonHelper(b) + So(h1.Keys(), ShouldContain, "name") + So(h1.Keys(), ShouldContain, "goals") + }) +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/setup_test.go b/vendor/github.com/mattbaird/elastigo/lib/setup_test.go new file mode 100644 index 0000000000..026f2dc54b --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/setup_test.go @@ -0,0 +1,84 @@ +package elastigo + +import ( + "testing" + "time" +) + +/* +// elastigo Conn adapter to avoid a circular dependency +type conn interface { + CreateIndex(name string) (interface{}, error) + DeleteIndex(name string) (interface{}, error) + + Index(index string, _type string, id string, args map[string]interface{}, data interface{}) (interface{}, error) +} +*/ + +func newIndexWorker(c *Conn, t *testing.T) func(interface{}) { + + return func(d interface{}) { + _, err := c.Index("oilers", "heyday", "", nil, d) + if err != nil { + t.Fatalf("Index failed: %s", err) + } + } +} + +func PopulateTestDB(t *testing.T, c *Conn) { + + // it is not technically necessary to create an index here + _, err := c.CreateIndex("oilers") + if err != nil { + t.Fatal("Error in CreateIndex", err) + } + + // set the mapping for dob to be a date so it can be used for range searches + _, err = c.DoCommand("PUT", "/oilers/heyday/_mapping?ignore_conflicts", nil, + string(`{"heyday": {"properties": { + "dob": {"type": "date", "format": "basic_date"}, + "pos": {"type": "string", "index": "not_analyzed"}, + "teams": {"type": "string", "index": "not_analyzed"} + }}}`)) + if err != nil { + t.Fatal("Error setting dob mapping", err) + } + + idx := newIndexWorker(c, t) + + idx(`{"name": "Mark Messier", "jersey": 11, "pos": "LW", "goals": 37, "PIM": 165, + "dob": "19610118", "teams": ["EDM", "NYR", "VAN"]}`) + idx(`{"name": "Wayne Gretzky", "jersey": 99, "pos": "C", "goals": 87, + "dob": "19610126", "teams": ["EDM", "NYR", "STL"]}`) + idx(`{"name": "Paul Coffey", "jersey": 7, "pos": "D", "goals": 40, + "dob": "19610601", "teams": ["EDM", "DET"]}`) + idx(`{"name": "Jari Kurri", "jersey": 17, "pos": "RW", "goals": 52, + "dob": "19600518", "teams": ["EDM", "VAN"]}`) + idx(`{"name": "Glenn Anderson", "jersey": 9, "pos": "RW", "goals": 54, + "dob": "19601002", "teams": ["EDM", "NYR", "TOR", "STL"]}`) + idx(`{"name": "Ken Linseman", "jersey": 13, "pos": "C", "goals": 18, + "dob": "19580811", "teams": ["EDM", "TOR"]}`) + idx(`{"name": "Pat Hughes", "jersey": 16, "pos": "RW", "goals": 27, + "dob": "19550325", "teams": ["EDM", "MTL", "PIT"]}`) + idx(`{"name": "Dave Hunter", "jersey": 12, "pos": "LW", "goals": 22, + "dob": "19580101", "teams": ["EDM", "PIT"]}`) + idx(`{"name": "Kevin Lowe", "jersey": 4, "pos": "D", "goals": 4, + "dob": "19590415", "teams": ["EDM", "NYR"]}`) + idx(`{"name": "Charlie Huddy", "jersey": 22, "pos": "D", "goals": 8, + "dob": "19590602", "teams": ["EDM", "BUF", "STL"]}`) + idx(`{"name": "Randy Gregg", "jersey": 21, "pos": "D", "goals": 13, + "dob": "19560219", "teams": ["EDM", "VAN"]}`) + idx(`{"name": "Dave Semenko", "jersey": 27, "pos": "LW", "goals": 4, "PIM": 118, + "dob": "19570712", "teams": ["EDM"]}`) + idx(`{"name": "Grant Fuhr", "jersey": 31, "pos": "G", "GAA": 3.91, + "dob": "19620928", "teams": ["EDM", "TOR", "BUF", "STL"]}`) + idx(`{"name": "Andy Moog", "jersey": 35, "pos": "G", "GAA": 3.77, + "dob": "19600218", "teams": ["EDM", "BOS", "DAL", "MTL"]}`) + + // HACK to let the ES magic happen + time.Sleep(time.Second) +} + +func TearDownTestDB(c *Conn) { + c.DeleteIndex("oilers") +} diff --git a/vendor/github.com/mattbaird/elastigo/lib/shared_test.go b/vendor/github.com/mattbaird/elastigo/lib/shared_test.go new file mode 100644 index 0000000000..4def7a63f9 --- /dev/null +++ b/vendor/github.com/mattbaird/elastigo/lib/shared_test.go @@ -0,0 +1,43 @@ +// Copyright 2013 Matthew Baird +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elastigo + +import ( + "encoding/json" + "flag" + "log" +) + +var ( + _ = log.Ldate + eshost *string = flag.String("host", "localhost", "Elasticsearch Server Host Address") + logLevel *string = flag.String("logging", "info", "Which log level: [debug,info,warn,error,fatal]") +) + +func GetJson(input interface{}) (map[string]interface{}, error) { + var result map[string]interface{} + bytes, err := json.Marshal(input) + + if err == nil { + err = json.Unmarshal(bytes, &result) + } + + return result, err +} + +func HasKey(input map[string]interface{}, key string) bool { + if _, ok := input[key]; ok { + return true + } + + return false +} diff --git a/vendor/github.com/metrics20/go-metrics20/README.md b/vendor/github.com/metrics20/go-metrics20/README.md new file mode 100644 index 0000000000..cc130278c2 --- /dev/null +++ b/vendor/github.com/metrics20/go-metrics20/README.md @@ -0,0 +1,8 @@ +## go-metrics20 +helper functions to detect metric format and convey operations by modifying tag data. + + +## supported implementations +* graphite (in both legacy (~statsd) and carbon 2.0 format) +* later maybe more for other systems / structures / protocols ? + diff --git a/vendor/github.com/metrics20/go-metrics20/carbon20/manipulate_test.go b/vendor/github.com/metrics20/go-metrics20/carbon20/manipulate_test.go new file mode 100644 index 0000000000..30080a0915 --- /dev/null +++ b/vendor/github.com/metrics20/go-metrics20/carbon20/manipulate_test.go @@ -0,0 +1,106 @@ +package carbon20 + +import ( + "github.com/bmizerany/assert" + "strings" + "testing" +) + +var out string + +type Case struct { + in string + p1 string + p2 string + p2ne string + out string +} + +func TestDeriveCount(t *testing.T) { + cases := []Case{ + // metrics 2.0 cases with equals + Case{"foo.bar.unit=yes.baz", "prefix.", "", "ignored", "foo.bar.unit=yesps.baz"}, + Case{"foo.bar.unit=yes", "prefix.", "our=prefix.", "ignored", "our=prefix.foo.bar.unit=yesps"}, + Case{"unit=yes.foo.bar", "prefix.", "", "ignored", "unit=yesps.foo.bar"}, + Case{"mtype=count.foo.unit=ok.bar", "prefix.", "", "ignored", "mtype=rate.foo.unit=okps.bar"}, + + // metrics 2.0 cases without equals + Case{"foo.bar.unit_is_yes.baz", "prefix.", "ignored", "", "foo.bar.unit_is_yesps.baz"}, + Case{"foo.bar.unit_is_yes", "prefix.", "ignored", "our_is_prefix.", "our_is_prefix.foo.bar.unit_is_yesps"}, + Case{"unit_is_yes.foo.bar", "prefix.", "ignored", "", "unit_is_yesps.foo.bar"}, + Case{"mtype_is_count.foo.unit_is_ok.bar", "prefix.", "ignored", "", "mtype_is_rate.foo.unit_is_okps.bar"}, + } + for _, c := range cases { + assert.Equal(t, DeriveCount(c.in, c.p1, c.p2, c.p2ne, false), c.out) + } +} + +// only 1 kind of stat is enough, cause they all behave the same +func TestStat(t *testing.T) { + cases := []Case{ + // metrics 2.0 cases with equals + Case{"foo.bar.unit=yes.baz", "prefix.", "", "ignored", "foo.bar.unit=yes.baz.stat=max_90"}, + Case{"foo.bar.unit=yes", "prefix.", "our=prefix.", "ignored", "our=prefix.foo.bar.unit=yes.stat=max_90"}, + Case{"unit=yes.foo.bar", "prefix.", "", "ignored", "unit=yes.foo.bar.stat=max_90"}, + Case{"mtype=count.foo.unit=ok.bar", "prefix.", "", "ignored", "mtype=count.foo.unit=ok.bar.stat=max_90"}, + // metrics 2.0 cases without equals + Case{"foo.bar.unit_is_yes.baz", "prefix.", "ignored", "", "foo.bar.unit_is_yes.baz.stat_is_max_90"}, + Case{"foo.bar.unit_is_yes", "prefix.", "ignored", "our_is_prefix.", "our_is_prefix.foo.bar.unit_is_yes.stat_is_max_90"}, + Case{"unit_is_yes.foo.bar", "prefix.", "ignored", "", "unit_is_yes.foo.bar.stat_is_max_90"}, + Case{"mtype_is_count.foo.unit_is_ok.bar", "prefix.", "ignored", "", "mtype_is_count.foo.unit_is_ok.bar.stat_is_max_90"}, + } + for _, c := range cases { + assert.Equal(t, Max(c.in, c.p1, c.p2, c.p2ne, "90", ""), c.out) + } + // same but without percentile + for i, c := range cases { + cases[i].out = strings.Replace(c.out, "max_90", "max", 1) + } + for _, c := range cases { + assert.Equal(t, Max(c.in, c.p1, c.p2, c.p2ne, "", ""), c.out) + } +} +func TestRateCountPckt(t *testing.T) { + cases := []Case{ + // metrics 2.0 cases with equals + Case{"foo.bar.unit=yes.baz", "prefix.", "", "ignored", "foo.bar.unit=Pckt.baz.orig_unit=yes.pckt_type=sent.direction=in"}, + Case{"foo.bar.unit=yes", "prefix.", "our=prefix.", "ignored", "our=prefix.foo.bar.unit=Pckt.orig_unit=yes.pckt_type=sent.direction=in"}, + Case{"unit=yes.foo.bar", "prefix.", "", "ignored", "unit=Pckt.foo.bar.orig_unit=yes.pckt_type=sent.direction=in"}, + Case{"mtype=count.foo.unit=ok.bar", "prefix.", "", "ignored", "mtype=count.foo.unit=Pckt.bar.orig_unit=ok.pckt_type=sent.direction=in"}, + // metrics 2.0 cases without equals + Case{"foo.bar.unit_is_yes.baz", "prefix.", "ignored", "", "foo.bar.unit_is_Pckt.baz.orig_unit_is_yes.pckt_type_is_sent.direction_is_in"}, + Case{"foo.bar.unit_is_yes", "prefix.", "ignored", "our_is_prefix.", "our_is_prefix.foo.bar.unit_is_Pckt.orig_unit_is_yes.pckt_type_is_sent.direction_is_in"}, + Case{"unit_is_yes.foo.bar", "prefix.", "ignored", "", "unit_is_Pckt.foo.bar.orig_unit_is_yes.pckt_type_is_sent.direction_is_in"}, + Case{"mtype_is_count.foo.unit_is_ok.bar", "prefix.", "ignored", "", "mtype_is_count.foo.unit_is_Pckt.bar.orig_unit_is_ok.pckt_type_is_sent.direction_is_in"}, + } + for _, c := range cases { + assert.Equal(t, CountPckt(c.in, c.p1, c.p2, c.p2ne), c.out) + c.out = strings.Replace(strings.Replace(c.out, "unit=Pckt", "unit=Pcktps", -1), "mtype=count", "mtype=rate", -1) + c.out = strings.Replace(strings.Replace(c.out, "unit_is_Pckt", "unit_is_Pcktps", -1), "mtype_is_count", "mtype_is_rate", -1) + assert.Equal(t, RatePckt(c.in, c.p1, c.p2, c.p2ne), c.out) + } +} + +func BenchmarkDeriveCountsM20Bare(b *testing.B) { + for i := 0; i < b.N; i++ { + out = DeriveCount("foo=bar", "prefix-m1.", "prefix-m2.", "prefix-m2ne.", false) + } +} + +func BenchmarkDeriveCountsM20Proper(b *testing.B) { + for i := 0; i < b.N; i++ { + out = DeriveCount("foo=bar.unit=yes.mtype=count", "prefix-m1.", "prefix-m2.", "prefix-m2ne.", false) + } +} + +func BenchmarkDeriveCountsM20NoEqualsBare(b *testing.B) { + for i := 0; i < b.N; i++ { + out = DeriveCount("foo_is_bar", "prefix-m1.", "prefix-m2.", "prefix-m2ne.", false) + } +} + +func BenchmarkDeriveCountsM20NoEqualsProper(b *testing.B) { + for i := 0; i < b.N; i++ { + out = DeriveCount("foo_is_bar.unit_is_yes.mtype_is_count", "prefix-m1.", "prefix-m2.", "prefix-m2ne.", false) + } +} diff --git a/vendor/github.com/metrics20/go-metrics20/carbon20/validate_test.go b/vendor/github.com/metrics20/go-metrics20/carbon20/validate_test.go new file mode 100644 index 0000000000..9c9db3eff0 --- /dev/null +++ b/vendor/github.com/metrics20/go-metrics20/carbon20/validate_test.go @@ -0,0 +1,148 @@ +package carbon20 + +import ( + "testing" + "math" + + "github.com/bmizerany/assert" +) + +func TestValidateLegacy(t *testing.T) { + cases := []struct { + in string + level ValidationLevelLegacy + valid bool + }{ + {"foo.bar", StrictLegacy, true}, + {"foo.bar", MediumLegacy, true}, + {"foo.bar", NoneLegacy, true}, + {"foo..bar", StrictLegacy, false}, + {"foo..bar", MediumLegacy, true}, + {"foo..bar", NoneLegacy, true}, + {"foo..bar.ba::z", StrictLegacy, false}, + {"foo..bar.ba::z", MediumLegacy, true}, + {"foo..bar.ba::z", NoneLegacy, true}, + {"foo..bar.b\xbdz", StrictLegacy, false}, + {"foo..bar.b\xbdz", MediumLegacy, false}, + {"foo..bar.b\xbdz", NoneLegacy, true}, + {"foo..bar.b\x00z", StrictLegacy, false}, + {"foo..bar.b\x00z", MediumLegacy, false}, + {"foo..bar.b\x00z", NoneLegacy, true}, + } + for _, c := range cases { + assert.Equal(t, ValidateKeyLegacy(c.in, c.level) == nil, c.valid) + assert.Equal(t, ValidateKeyLegacyB([]byte(c.in), c.level) == nil, c.valid) + } +} + +func TestValidateTimestamps(t *testing.T) { + cases := []struct { + in string + valid bool + }{ + {"foo.bar 1 123", true}, + {"foo.bar 1 123.0", true}, + {"foo.bar 1 123abc", false}, + } + + for _, c := range cases { + _, _, _, err := ValidatePacket([]byte(c.in), NoneLegacy, NoneM20) + valid := err == nil + if valid != c.valid { + t.Errorf("in='%s' valid=%v, expected %v", c.in, valid, c.valid) + } + } +} + +func TestValidateValues(t *testing.T) { + cases := []struct { + in string + value float64 + valid bool + }{ + {"foo.bar -1 123", -1, true}, + {"foo.bar 1e5 123", 1e5, true}, + {"foo.bar 1E+5 123", 1e5, true}, + {"foo.bar +1E-5 123", 1e-5, true}, + {"foo.bar 1e100 123", 1e100, true}, + {"foo.bar z1 123", 0, false}, + {"foo.bar ++1 123", 0, false}, + } + + for _, c := range cases { + _, val, _, err := ValidatePacket([]byte(c.in), NoneLegacy, NoneM20) + valid := err == nil + if valid != c.valid { + t.Errorf("in='%s' valid=%v, expected %v", c.in, valid, c.valid) + } + if math.Abs(val - c.value) > 0.0001 { + t.Errorf("in='%s', value=%v, expected %v", c.in, val, c.value) + } + } +} + + +func TestValidateM20(t *testing.T) { + cases := []struct { + in string + level ValidationLevelM20 + valid bool + }{ + {"foo.bar.aunit=no.baz", MediumM20, false}, + {"foo.bar.UNIT=no.baz", MediumM20, false}, + {"foo.bar.unita=no.bar", MediumM20, false}, + {"foo.bar.aunit=no.baz", NoneM20, true}, + {"foo.bar.UNIT=no.baz", NoneM20, true}, + {"foo.bar.unita=no.bar", NoneM20, true}, + } + for _, c := range cases { + assert.Equal(t, ValidateKeyM20(c.in, c.level) == nil, c.valid) + assert.Equal(t, ValidateKeyM20B([]byte(c.in), c.level) == nil, c.valid) + } +} +func TestValidateM20NoEquals(t *testing.T) { + cases := []struct { + in string + level ValidationLevelM20 + valid bool + }{ + {"foo.bar.mtype_is_count.baz", MediumM20, false}, + {"foo.bar.mtype_is_count", MediumM20, false}, + {"mtype_is_count.foo.bar", MediumM20, false}, + {"foo.bar.mtype_is_count.baz", NoneM20, true}, + {"foo.bar.mtype_is_count", NoneM20, true}, + {"mtype_is_count.foo.bar", NoneM20, true}, + } + for _, c := range cases { + assert.Equal(t, ValidateKeyM20NoEquals(c.in, c.level) == nil, c.valid) + assert.Equal(t, ValidateKeyM20NoEqualsB([]byte(c.in), c.level) == nil, c.valid) + } +} + +func BenchmarkValidatePacketNone(b *testing.B) { + in := []byte("carbon.agents.foo.cache.overflow 123.456 1234567890") + for i := 0; i < b.N; i++ { + _, _, _, err := ValidatePacket(in, NoneLegacy, NoneM20) + if err != nil { + panic(err) + } + } +} +func BenchmarkValidatePacketMedium(b *testing.B) { + in := []byte("carbon.agents.foo.cache.overflow 123.456 1234567890") + for i := 0; i < b.N; i++ { + _, _, _, err := ValidatePacket(in, MediumLegacy, NoneM20) + if err != nil { + panic(err) + } + } +} +func BenchmarkValidatePacketStrict(b *testing.B) { + in := []byte("carbon.agents.foo.cache.overflow 123.456 1234567890") + for i := 0; i < b.N; i++ { + _, _, _, err := ValidatePacket(in, StrictLegacy, NoneM20) + if err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/metrics20/go-metrics20/carbon20/version_test.go b/vendor/github.com/metrics20/go-metrics20/carbon20/version_test.go new file mode 100644 index 0000000000..67f7a4dedf --- /dev/null +++ b/vendor/github.com/metrics20/go-metrics20/carbon20/version_test.go @@ -0,0 +1,99 @@ +package carbon20 + +import ( + "testing" + + "github.com/bmizerany/assert" +) + +var version metricVersion + +func TestValidate(t *testing.T) { + cases := []struct { + in string + version metricVersion + }{ + {"foo.bar", Legacy}, + {"foo.bar", Legacy}, + {"foo.bar", Legacy}, + {"foo..bar", Legacy}, + {"foo..bar", Legacy}, + {"foo..bar", Legacy}, + {"foo..bar.ba::z", Legacy}, + {"foo..bar.ba::z", Legacy}, + {"foo..bar.ba::z", Legacy}, + {"foo..bar.b\xbdz", Legacy}, + {"foo..bar.b\xbdz", Legacy}, + {"foo..bar.b\xbdz", Legacy}, + {"foo..bar.b\x00z", Legacy}, + {"foo..bar.b\x00z", Legacy}, + {"foo..bar.b\x00z", Legacy}, + {"foo.bar.aunit=no.baz", M20}, + {"foo.bar.UNIT=no.baz", M20}, + {"foo.bar.unita=no.bar", M20}, + {"foo.bar.mtype_is_count.baz", M20NoEquals}, + {"foo.bar.mtype_is_count", M20NoEquals}, + {"mtype_is_count.foo.bar", M20NoEquals}, + } + for _, c := range cases { + version := GetVersion(c.in) + assert.Equal(t, c.version, version) + } +} + +func TestGetVersionB(t *testing.T) { + cases := []struct { + in []byte + v metricVersion + }{ + { + []byte("service=carbon.instance=foo.unit=Err.mtype=gauge.type=cache_overflow"), + M20, + }, + { + []byte("service_is_carbon.instance_is_foo.unit_is_Err.mtype_is_gauge.type_is_cache_overflow"), + M20NoEquals, + }, + { + []byte("carbon.agents.foo.cache.overflow"), + Legacy, + }, + { + []byte("foo-bar"), + Legacy, + }, + } + for i, c := range cases { + v := GetVersionB(c.in) + if v != c.v { + t.Fatalf("case %d: expected %s, got %s", i, c.v, v) + } + } +} + +func BenchmarkGetVersionBM20(b *testing.B) { + in := []byte("service=carbon.instance=foo.unit=Err.mtype=gauge.type=cache_overflow") + var v metricVersion + for i := 0; i < b.N; i++ { + v = GetVersionB(in) + } + version = v +} + +func BenchmarkGetVersionBM20NoEquals(b *testing.B) { + in := []byte("service_is_carbon.instance_is_foo.unit_is_Err.mtype_is_gauge.type_is_cache_overflow") + var v metricVersion + for i := 0; i < b.N; i++ { + v = GetVersionB(in) + } + version = v +} + +func BenchmarkGetVersionBLegacy(b *testing.B) { + in := []byte("carbon.agents.foo.cache.overflow") + var v metricVersion + for i := 0; i < b.N; i++ { + v = GetVersionB(in) + } + version = v +} diff --git a/vendor/github.com/miekg/dns/.gitignore b/vendor/github.com/miekg/dns/.gitignore new file mode 100644 index 0000000000..776cd950c2 --- /dev/null +++ b/vendor/github.com/miekg/dns/.gitignore @@ -0,0 +1,4 @@ +*.6 +tags +test.out +a.out diff --git a/vendor/github.com/miekg/dns/.travis.yml b/vendor/github.com/miekg/dns/.travis.yml new file mode 100644 index 0000000000..1f056ab7cc --- /dev/null +++ b/vendor/github.com/miekg/dns/.travis.yml @@ -0,0 +1,7 @@ +language: go +sudo: false +go: + - 1.5 + - 1.6 +script: + - go test -race -v -bench=. diff --git a/vendor/github.com/miekg/dns/client_test.go b/vendor/github.com/miekg/dns/client_test.go new file mode 100644 index 0000000000..f3d229c4c1 --- /dev/null +++ b/vendor/github.com/miekg/dns/client_test.go @@ -0,0 +1,469 @@ +package dns + +import ( + "crypto/tls" + "fmt" + "net" + "strconv" + "testing" + "time" +) + +func TestClientSync(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + c := new(Client) + r, _, err := c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } + // And now with plain Exchange(). + r, err = Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r == nil || r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} + +func TestClientTLSSync(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) + if err != nil { + t.Fatalf("unable to build certificate: %v", err) + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + c := new(Client) + c.Net = "tcp-tls" + c.TLSConfig = &tls.Config{ + InsecureSkipVerify: true, + } + + r, _, err := c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} + +func TestClientSyncBadId(t *testing.T) { + HandleFunc("miek.nl.", HelloServerBadId) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + c := new(Client) + if _, _, err := c.Exchange(m, addrstr); err != ErrId { + t.Errorf("did not find a bad Id") + } + // And now with plain Exchange(). + if _, err := Exchange(m, addrstr); err != ErrId { + t.Errorf("did not find a bad Id") + } +} + +func TestClientEDNS0(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeDNSKEY) + + m.SetEdns0(2048, true) + + c := new(Client) + r, _, err := c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} + +// Validates the transmission and parsing of local EDNS0 options. +func TestClientEDNS0Local(t *testing.T) { + optStr1 := "1979:0x0707" + optStr2 := strconv.Itoa(EDNS0LOCALSTART) + ":0x0601" + + handler := func(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + + m.Extra = make([]RR, 1, 2) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello local edns"}} + + // If the local options are what we expect, then reflect them back. + ec1 := req.Extra[0].(*OPT).Option[0].(*EDNS0_LOCAL).String() + ec2 := req.Extra[0].(*OPT).Option[1].(*EDNS0_LOCAL).String() + if ec1 == optStr1 && ec2 == optStr2 { + m.Extra = append(m.Extra, req.Extra[0]) + } + + w.WriteMsg(m) + } + + HandleFunc("miek.nl.", handler) + defer HandleRemove("miek.nl.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %s", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + + // Add two local edns options to the query. + ec1 := &EDNS0_LOCAL{Code: 1979, Data: []byte{7, 7}} + ec2 := &EDNS0_LOCAL{Code: EDNS0LOCALSTART, Data: []byte{6, 1}} + o := &OPT{Hdr: RR_Header{Name: ".", Rrtype: TypeOPT}, Option: []EDNS0{ec1, ec2}} + m.Extra = append(m.Extra, o) + + c := new(Client) + r, _, e := c.Exchange(m, addrstr) + if e != nil { + t.Logf("failed to exchange: %s", e.Error()) + t.Fail() + } + + if r != nil && r.Rcode != RcodeSuccess { + t.Log("failed to get a valid answer") + t.Fail() + t.Logf("%v\n", r) + } + + txt := r.Extra[0].(*TXT).Txt[0] + if txt != "Hello local edns" { + t.Log("Unexpected result for miek.nl", txt, "!= Hello local edns") + t.Fail() + } + + // Validate the local options in the reply. + got := r.Extra[1].(*OPT).Option[0].(*EDNS0_LOCAL).String() + if got != optStr1 { + t.Logf("failed to get local edns0 answer; got %s, expected %s", got, optStr1) + t.Fail() + t.Logf("%v\n", r) + } + + got = r.Extra[1].(*OPT).Option[1].(*EDNS0_LOCAL).String() + if got != optStr2 { + t.Logf("failed to get local edns0 answer; got %s, expected %s", got, optStr2) + t.Fail() + t.Logf("%v\n", r) + } +} + +// ExampleTsigSecret_updateLeaseTSIG shows how to update a lease signed with TSIG +func ExampleTsigSecret_updateLeaseTSIG() { + m := new(Msg) + m.SetUpdate("t.local.ip6.io.") + rr, _ := NewRR("t.local.ip6.io. 30 A 127.0.0.1") + rrs := make([]RR, 1) + rrs[0] = rr + m.Insert(rrs) + + leaseRr := new(OPT) + leaseRr.Hdr.Name = "." + leaseRr.Hdr.Rrtype = TypeOPT + e := new(EDNS0_UL) + e.Code = EDNS0UL + e.Lease = 120 + leaseRr.Option = append(leaseRr.Option, e) + m.Extra = append(m.Extra, leaseRr) + + c := new(Client) + m.SetTsig("polvi.", HmacMD5, 300, time.Now().Unix()) + c.TsigSecret = map[string]string{"polvi.": "pRZgBrBvI4NAHZYhxmhs/Q=="} + + _, _, err := c.Exchange(m, "127.0.0.1:53") + if err != nil { + panic(err) + } +} + +func TestClientConn(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + + // This uses TCP just to make it slightly different than TestClientSync + s, addrstr, err := RunLocalTCPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSOA) + + cn, err := Dial("tcp", addrstr) + if err != nil { + t.Errorf("failed to dial %s: %v", addrstr, err) + } + + err = cn.WriteMsg(m) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + r, err := cn.ReadMsg() + if r == nil || r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } + + err = cn.WriteMsg(m) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + h := new(Header) + buf, err := cn.ReadMsgHeader(h) + if buf == nil { + t.Errorf("failed to get an valid answer\n%v", r) + } + if int(h.Bits&0xF) != RcodeSuccess { + t.Errorf("failed to get an valid answer in ReadMsgHeader\n%v", r) + } + if h.Ancount != 0 || h.Qdcount != 1 || h.Nscount != 0 || h.Arcount != 1 { + t.Errorf("expected to have question and additional in response; got something else: %+v", h) + } + if err = r.Unpack(buf); err != nil { + t.Errorf("unable to unpack message fully: %v", err) + } +} + +func TestTruncatedMsg(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl.", TypeSRV) + cnt := 10 + for i := 0; i < cnt; i++ { + r := &SRV{ + Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeSRV, Class: ClassINET, Ttl: 0}, + Port: uint16(i + 8000), + Target: "target.miek.nl.", + } + m.Answer = append(m.Answer, r) + + re := &A{ + Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeA, Class: ClassINET, Ttl: 0}, + A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i)).To4(), + } + m.Extra = append(m.Extra, re) + } + buf, err := m.Pack() + if err != nil { + t.Errorf("failed to pack: %v", err) + } + + r := new(Msg) + if err = r.Unpack(buf); err != nil { + t.Errorf("unable to unpack message: %v", err) + } + if len(r.Answer) != cnt { + t.Logf("answer count after regular unpack doesn't match: %d", len(r.Answer)) + t.Fail() + } + if len(r.Extra) != cnt { + t.Logf("extra count after regular unpack doesn't match: %d", len(r.Extra)) + t.Fail() + } + + m.Truncated = true + buf, err = m.Pack() + if err != nil { + t.Errorf("failed to pack truncated: %v", err) + } + + r = new(Msg) + if err = r.Unpack(buf); err != nil && err != ErrTruncated { + t.Errorf("unable to unpack truncated message: %v", err) + } + if !r.Truncated { + t.Log("truncated message wasn't unpacked as truncated") + t.Fail() + } + if len(r.Answer) != cnt { + t.Logf("answer count after truncated unpack doesn't match: %d", len(r.Answer)) + t.Fail() + } + if len(r.Extra) != cnt { + t.Logf("extra count after truncated unpack doesn't match: %d", len(r.Extra)) + t.Fail() + } + + // Now we want to remove almost all of the extra records + // We're going to loop over the extra to get the count of the size of all + // of them + off := 0 + buf1 := make([]byte, m.Len()) + for i := 0; i < len(m.Extra); i++ { + off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) + if err != nil { + t.Errorf("failed to pack extra: %v", err) + } + } + + // Remove all of the extra bytes but 10 bytes from the end of buf + off -= 10 + buf1 = buf[:len(buf)-off] + + r = new(Msg) + if err = r.Unpack(buf1); err != nil && err != ErrTruncated { + t.Errorf("unable to unpack cutoff message: %v", err) + } + if !r.Truncated { + t.Log("truncated cutoff message wasn't unpacked as truncated") + t.Fail() + } + if len(r.Answer) != cnt { + t.Logf("answer count after cutoff unpack doesn't match: %d", len(r.Answer)) + t.Fail() + } + if len(r.Extra) != 0 { + t.Logf("extra count after cutoff unpack is not zero: %d", len(r.Extra)) + t.Fail() + } + + // Now we want to remove almost all of the answer records too + buf1 = make([]byte, m.Len()) + as := 0 + for i := 0; i < len(m.Extra); i++ { + off1 := off + off, err = PackRR(m.Extra[i], buf1, off, nil, m.Compress) + as = off - off1 + if err != nil { + t.Errorf("failed to pack extra: %v", err) + } + } + + // Keep exactly one answer left + // This should still cause Answer to be nil + off -= as + buf1 = buf[:len(buf)-off] + + r = new(Msg) + if err = r.Unpack(buf1); err != nil && err != ErrTruncated { + t.Errorf("unable to unpack cutoff message: %v", err) + } + if !r.Truncated { + t.Log("truncated cutoff message wasn't unpacked as truncated") + t.Fail() + } + if len(r.Answer) != 0 { + t.Logf("answer count after second cutoff unpack is not zero: %d", len(r.Answer)) + t.Fail() + } + + // Now leave only 1 byte of the question + // Since the header is always 12 bytes, we just need to keep 13 + buf1 = buf[:13] + + r = new(Msg) + err = r.Unpack(buf1) + if err == nil || err == ErrTruncated { + t.Logf("error should not be ErrTruncated from question cutoff unpack: %v", err) + t.Fail() + } + + // Finally, if we only have the header, we should still return an error + buf1 = buf[:12] + + r = new(Msg) + if err = r.Unpack(buf1); err == nil || err != ErrTruncated { + t.Logf("error not ErrTruncated from header-only unpack: %v", err) + t.Fail() + } +} + +func TestTimeout(t *testing.T) { + // Set up a dummy UDP server that won't respond + addr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0") + if err != nil { + t.Fatalf("unable to resolve local udp address: %v", err) + } + conn, err := net.ListenUDP("udp", addr) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer conn.Close() + addrstr := conn.LocalAddr().String() + + // Message to send + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + + // Use a channel + timeout to ensure we don't get stuck if the + // Client Timeout is not working properly + done := make(chan struct{}) + + timeout := time.Millisecond + allowable := timeout + (10 * time.Millisecond) + abortAfter := timeout + (100 * time.Millisecond) + + start := time.Now() + + go func() { + c := &Client{Timeout: timeout} + _, _, err := c.Exchange(m, addrstr) + if err == nil { + t.Error("no timeout using Client") + } + done <- struct{}{} + }() + + select { + case <-done: + case <-time.After(abortAfter): + } + + length := time.Since(start) + + if length > allowable { + t.Errorf("exchange took longer (%v) than specified Timeout (%v)", length, timeout) + } +} diff --git a/vendor/github.com/miekg/dns/clientconfig_test.go b/vendor/github.com/miekg/dns/clientconfig_test.go new file mode 100644 index 0000000000..63bc5c814b --- /dev/null +++ b/vendor/github.com/miekg/dns/clientconfig_test.go @@ -0,0 +1,50 @@ +package dns + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +const normal string = ` +# Comment +domain somedomain.com +nameserver 10.28.10.2 +nameserver 11.28.10.1 +` + +const missingNewline string = ` +domain somedomain.com +nameserver 10.28.10.2 +nameserver 11.28.10.1` // <- NOTE: NO newline. + +func testConfig(t *testing.T, data string) { + tempDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("tempDir: %v", err) + } + defer os.RemoveAll(tempDir) + + path := filepath.Join(tempDir, "resolv.conf") + if err := ioutil.WriteFile(path, []byte(data), 0644); err != nil { + t.Fatalf("writeFile: %v", err) + } + cc, err := ClientConfigFromFile(path) + if err != nil { + t.Errorf("error parsing resolv.conf: %v", err) + } + if l := len(cc.Servers); l != 2 { + t.Errorf("incorrect number of nameservers detected: %d", l) + } + if l := len(cc.Search); l != 1 { + t.Errorf("domain directive not parsed correctly: %v", cc.Search) + } else { + if cc.Search[0] != "somedomain.com" { + t.Errorf("domain is unexpected: %v", cc.Search[0]) + } + } +} + +func TestNameserver(t *testing.T) { testConfig(t, normal) } +func TestMissingFinalNewLine(t *testing.T) { testConfig(t, missingNewline) } diff --git a/vendor/github.com/miekg/dns/dns_bench_test.go b/vendor/github.com/miekg/dns/dns_bench_test.go new file mode 100644 index 0000000000..bccc3d5404 --- /dev/null +++ b/vendor/github.com/miekg/dns/dns_bench_test.go @@ -0,0 +1,211 @@ +package dns + +import ( + "net" + "testing" +) + +func BenchmarkMsgLength(b *testing.B) { + b.StopTimer() + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + b.StartTimer() + for i := 0; i < b.N; i++ { + msg.Len() + } +} + +func BenchmarkMsgLengthPack(b *testing.B) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = msg.Pack() + } +} + +func BenchmarkPackDomainName(b *testing.B) { + name1 := "12345678901234567890123456789012345.12345678.123." + buf := make([]byte, len(name1)+1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackDomainName(name1, buf, 0, nil, false) + } +} + +func BenchmarkUnpackDomainName(b *testing.B) { + name1 := "12345678901234567890123456789012345.12345678.123." + buf := make([]byte, len(name1)+1) + _, _ = PackDomainName(name1, buf, 0, nil, false) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackDomainName(buf, 0) + } +} + +func BenchmarkUnpackDomainNameUnprintable(b *testing.B) { + name1 := "\x02\x02\x02\x025\x02\x02\x02\x02.12345678.123." + buf := make([]byte, len(name1)+1) + _, _ = PackDomainName(name1, buf, 0, nil, false) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackDomainName(buf, 0) + } +} + +func BenchmarkCopy(b *testing.B) { + b.ReportAllocs() + m := new(Msg) + m.SetQuestion("miek.nl.", TypeA) + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Answer = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") + m.Ns = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Extra = []RR{rr} + + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Copy() + } +} + +func BenchmarkPackA(b *testing.B) { + a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} + + buf := make([]byte, a.len()) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackRR(a, buf, 0, nil, false) + } +} + +func BenchmarkUnpackA(b *testing.B) { + a := &A{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, A: net.IPv4(127, 0, 0, 1)} + + buf := make([]byte, a.len()) + PackRR(a, buf, 0, nil, false) + a = nil + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackRR(buf, 0) + } +} + +func BenchmarkPackMX(b *testing.B) { + m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} + + buf := make([]byte, m.len()) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackRR(m, buf, 0, nil, false) + } +} + +func BenchmarkUnpackMX(b *testing.B) { + m := &MX{Hdr: RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY}, Mx: "mx.miek.nl."} + + buf := make([]byte, m.len()) + PackRR(m, buf, 0, nil, false) + m = nil + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackRR(buf, 0) + } +} + +func BenchmarkPackAAAAA(b *testing.B) { + aaaa, _ := NewRR(". IN A ::1") + + buf := make([]byte, aaaa.len()) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = PackRR(aaaa, buf, 0, nil, false) + } +} + +func BenchmarkUnpackAAAA(b *testing.B) { + aaaa, _ := NewRR(". IN A ::1") + + buf := make([]byte, aaaa.len()) + PackRR(aaaa, buf, 0, nil, false) + aaaa = nil + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _, _ = UnpackRR(buf, 0) + } +} + +func BenchmarkPackMsg(b *testing.B) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + buf := make([]byte, 512) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, _ = msg.PackBuffer(buf) + } +} + +func BenchmarkUnpackMsg(b *testing.B) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + name1 := "12345678901234567890123456789012345.12345678.123." + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + msg := makeMsg(name1, []RR{rrMx, rrMx}, nil, nil) + msgBuf, _ := msg.Pack() + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = msg.Unpack(msgBuf) + } +} + +func BenchmarkIdGeneration(b *testing.B) { + for i := 0; i < b.N; i++ { + _ = id() + } +} diff --git a/vendor/github.com/miekg/dns/dns_test.go b/vendor/github.com/miekg/dns/dns_test.go new file mode 100644 index 0000000000..34007aa86d --- /dev/null +++ b/vendor/github.com/miekg/dns/dns_test.go @@ -0,0 +1,452 @@ +package dns + +import ( + "encoding/hex" + "net" + "testing" +) + +func TestPackUnpack(t *testing.T) { + out := new(Msg) + out.Answer = make([]RR, 1) + key := new(DNSKEY) + key = &DNSKEY{Flags: 257, Protocol: 3, Algorithm: RSASHA1} + key.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 3600} + key.PublicKey = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" + + out.Answer[0] = key + msg, err := out.Pack() + if err != nil { + t.Error("failed to pack msg with DNSKEY") + } + in := new(Msg) + if in.Unpack(msg) != nil { + t.Error("failed to unpack msg with DNSKEY") + } + + sig := new(RRSIG) + sig = &RRSIG{TypeCovered: TypeDNSKEY, Algorithm: RSASHA1, Labels: 2, + OrigTtl: 3600, Expiration: 4000, Inception: 4000, KeyTag: 34641, SignerName: "miek.nl.", + Signature: "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ"} + sig.Hdr = RR_Header{Name: "miek.nl.", Rrtype: TypeRRSIG, Class: ClassINET, Ttl: 3600} + + out.Answer[0] = sig + msg, err = out.Pack() + if err != nil { + t.Error("failed to pack msg with RRSIG") + } + + if in.Unpack(msg) != nil { + t.Error("failed to unpack msg with RRSIG") + } +} + +func TestPackUnpack2(t *testing.T) { + m := new(Msg) + m.Extra = make([]RR, 1) + m.Answer = make([]RR, 1) + dom := "miek.nl." + rr := new(A) + rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} + rr.A = net.IPv4(127, 0, 0, 1) + + x := new(TXT) + x.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + x.Txt = []string{"heelalaollo"} + + m.Extra[0] = x + m.Answer[0] = rr + _, err := m.Pack() + if err != nil { + t.Error("Packing failed: ", err) + return + } +} + +func TestPackUnpack3(t *testing.T) { + m := new(Msg) + m.Extra = make([]RR, 2) + m.Answer = make([]RR, 1) + dom := "miek.nl." + rr := new(A) + rr.Hdr = RR_Header{Name: dom, Rrtype: TypeA, Class: ClassINET, Ttl: 0} + rr.A = net.IPv4(127, 0, 0, 1) + + x1 := new(TXT) + x1.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + x1.Txt = []string{} + + x2 := new(TXT) + x2.Hdr = RR_Header{Name: dom, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + x2.Txt = []string{"heelalaollo"} + + m.Extra[0] = x1 + m.Extra[1] = x2 + m.Answer[0] = rr + b, err := m.Pack() + if err != nil { + t.Error("packing failed: ", err) + return + } + + var unpackMsg Msg + err = unpackMsg.Unpack(b) + if err != nil { + t.Error("unpacking failed") + return + } +} + +func TestBailiwick(t *testing.T) { + yes := map[string]string{ + "miek1.nl": "miek1.nl", + "miek.nl": "ns.miek.nl", + ".": "miek.nl", + } + for parent, child := range yes { + if !IsSubDomain(parent, child) { + t.Errorf("%s should be child of %s", child, parent) + t.Errorf("comparelabels %d", CompareDomainName(parent, child)) + t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) + } + } + no := map[string]string{ + "www.miek.nl": "ns.miek.nl", + "m\\.iek.nl": "ns.miek.nl", + "w\\.iek.nl": "w.iek.nl", + "p\\\\.iek.nl": "ns.p.iek.nl", // p\\.iek.nl , literal \ in domain name + "miek.nl": ".", + } + for parent, child := range no { + if IsSubDomain(parent, child) { + t.Errorf("%s should not be child of %s", child, parent) + t.Errorf("comparelabels %d", CompareDomainName(parent, child)) + t.Errorf("lenlabels %d %d", CountLabel(parent), CountLabel(child)) + } + } +} + +func TestPack(t *testing.T) { + rr := []string{"US. 86400 IN NSEC 0-.us. NS SOA RRSIG NSEC DNSKEY TYPE65534"} + m := new(Msg) + var err error + m.Answer = make([]RR, 1) + for _, r := range rr { + m.Answer[0], err = NewRR(r) + if err != nil { + t.Errorf("failed to create RR: %v", err) + continue + } + if _, err := m.Pack(); err != nil { + t.Errorf("packing failed: %v", err) + } + } + x := new(Msg) + ns, _ := NewRR("pool.ntp.org. 390 IN NS a.ntpns.org") + ns.(*NS).Ns = "a.ntpns.org" + x.Ns = append(m.Ns, ns) + x.Ns = append(m.Ns, ns) + x.Ns = append(m.Ns, ns) + // This crashes due to the fact the a.ntpns.org isn't a FQDN + // How to recover() from a remove panic()? + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } + x.Answer = make([]RR, 1) + x.Answer[0], err = NewRR(rr[0]) + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } + x.Question = make([]Question, 1) + x.Question[0] = Question{";sd#edddds鍛↙赏‘℅∥↙xzztsestxssweewwsssstx@s@Z嵌e@cn.pool.ntp.org.", TypeA, ClassINET} + if _, err := x.Pack(); err == nil { + t.Error("packing should fail") + } +} + +func TestPackNAPTR(t *testing.T) { + for _, n := range []string{ + `apple.com. IN NAPTR 100 50 "se" "SIP+D2U" "" _sip._udp.apple.com.`, + `apple.com. IN NAPTR 90 50 "se" "SIP+D2T" "" _sip._tcp.apple.com.`, + `apple.com. IN NAPTR 50 50 "se" "SIPS+D2T" "" _sips._tcp.apple.com.`, + } { + rr, _ := NewRR(n) + msg := make([]byte, rr.len()) + if off, err := PackRR(rr, msg, 0, nil, false); err != nil { + t.Errorf("packing failed: %v", err) + t.Errorf("length %d, need more than %d", rr.len(), off) + } else { + t.Logf("buf size needed: %d", off) + } + } +} + +func TestCompressLength(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl", TypeMX) + ul := m.Len() + m.Compress = true + if ul != m.Len() { + t.Fatalf("should be equal") + } +} + +// Does the predicted length match final packed length? +func TestMsgCompressLength(t *testing.T) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + msg.Compress = true + return msg + } + + name1 := "12345678901234567890123456789012345.12345678.123." + rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + tests := []*Msg{ + makeMsg(name1, []RR{rrA}, nil, nil), + makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} + + for _, msg := range tests { + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted < len(buf) { + t.Errorf("predicted compressed length is wrong: predicted %s (len=%d) %d, actual %d", + msg.Question[0].Name, len(msg.Answer), predicted, len(buf)) + } + } +} + +func TestMsgLength(t *testing.T) { + makeMsg := func(question string, ans, ns, e []RR) *Msg { + msg := new(Msg) + msg.SetQuestion(Fqdn(question), TypeANY) + msg.Answer = append(msg.Answer, ans...) + msg.Ns = append(msg.Ns, ns...) + msg.Extra = append(msg.Extra, e...) + return msg + } + + name1 := "12345678901234567890123456789012345.12345678.123." + rrA, _ := NewRR(name1 + " 3600 IN A 192.0.2.1") + rrMx, _ := NewRR(name1 + " 3600 IN MX 10 " + name1) + tests := []*Msg{ + makeMsg(name1, []RR{rrA}, nil, nil), + makeMsg(name1, []RR{rrMx, rrMx}, nil, nil)} + + for _, msg := range tests { + predicted := msg.Len() + buf, err := msg.Pack() + if err != nil { + t.Error(err) + } + if predicted < len(buf) { + t.Errorf("predicted length is wrong: predicted %s (len=%d), actual %d", + msg.Question[0].Name, predicted, len(buf)) + } + } +} + +func TestMsgLength2(t *testing.T) { + // Serialized replies + var testMessages = []string{ + // google.com. IN A? + "064e81800001000b0004000506676f6f676c6503636f6d0000010001c00c00010001000000050004adc22986c00c00010001000000050004adc22987c00c00010001000000050004adc22988c00c00010001000000050004adc22989c00c00010001000000050004adc2298ec00c00010001000000050004adc22980c00c00010001000000050004adc22981c00c00010001000000050004adc22982c00c00010001000000050004adc22983c00c00010001000000050004adc22984c00c00010001000000050004adc22985c00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc0d800010001000000050004d8ef200ac0ea00010001000000050004d8ef220ac0fc00010001000000050004d8ef240ac10e00010001000000050004d8ef260a0000290500000000050000", + // amazon.com. IN A? (reply has no EDNS0 record) + // TODO(miek): this one is off-by-one, need to find out why + //"6de1818000010004000a000806616d617a6f6e03636f6d0000010001c00c000100010000000500044815c2d4c00c000100010000000500044815d7e8c00c00010001000000050004b02062a6c00c00010001000000050004cdfbf236c00c000200010000000500140570646e733408756c747261646e73036f726700c00c000200010000000500150570646e733508756c747261646e7304696e666f00c00c000200010000000500160570646e733608756c747261646e7302636f02756b00c00c00020001000000050014036e7331037033310664796e656374036e657400c00c00020001000000050006036e7332c0cfc00c00020001000000050006036e7333c0cfc00c00020001000000050006036e7334c0cfc00c000200010000000500110570646e733108756c747261646e73c0dac00c000200010000000500080570646e7332c127c00c000200010000000500080570646e7333c06ec0cb00010001000000050004d04e461fc0eb00010001000000050004cc0dfa1fc0fd00010001000000050004d04e471fc10f00010001000000050004cc0dfb1fc12100010001000000050004cc4a6c01c121001c000100000005001020010502f3ff00000000000000000001c13e00010001000000050004cc4a6d01c13e001c0001000000050010261000a1101400000000000000000001", + // yahoo.com. IN A? + "fc2d81800001000300070008057961686f6f03636f6d0000010001c00c00010001000000050004628afd6dc00c00010001000000050004628bb718c00c00010001000000050004cebe242dc00c00020001000000050006036e7336c00cc00c00020001000000050006036e7338c00cc00c00020001000000050006036e7331c00cc00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7335c00cc07b0001000100000005000444b48310c08d00010001000000050004448eff10c09f00010001000000050004cb54dd35c0b100010001000000050004628a0b9dc0c30001000100000005000477a0f77cc05700010001000000050004ca2bdfaac06900010001000000050004caa568160000290500000000050000", + // microsoft.com. IN A? + "f4368180000100020005000b096d6963726f736f667403636f6d0000010001c00c0001000100000005000440040b25c00c0001000100000005000441373ac9c00c0002000100000005000e036e7331046d736674036e657400c00c00020001000000050006036e7332c04fc00c00020001000000050006036e7333c04fc00c00020001000000050006036e7334c04fc00c00020001000000050006036e7335c04fc04b000100010000000500044137253ec04b001c00010000000500102a010111200500000000000000010001c0650001000100000005000440043badc065001c00010000000500102a010111200600060000000000010001c07700010001000000050004d5c7b435c077001c00010000000500102a010111202000000000000000010001c08900010001000000050004cf2e4bfec089001c00010000000500102404f800200300000000000000010001c09b000100010000000500044137e28cc09b001c00010000000500102a010111200f000100000000000100010000290500000000050000", + // google.com. IN MX? + "724b8180000100050004000b06676f6f676c6503636f6d00000f0001c00c000f000100000005000c000a056173706d78016cc00cc00c000f0001000000050009001404616c7431c02ac00c000f0001000000050009001e04616c7432c02ac00c000f0001000000050009002804616c7433c02ac00c000f0001000000050009003204616c7434c02ac00c00020001000000050006036e7332c00cc00c00020001000000050006036e7333c00cc00c00020001000000050006036e7334c00cc00c00020001000000050006036e7331c00cc02a00010001000000050004adc2421bc02a001c00010000000500102a00145040080c01000000000000001bc04200010001000000050004adc2461bc05700010001000000050004adc2451bc06c000100010000000500044a7d8f1bc081000100010000000500044a7d191bc0ca00010001000000050004d8ef200ac09400010001000000050004d8ef220ac0a600010001000000050004d8ef240ac0b800010001000000050004d8ef260a0000290500000000050000", + // reddit.com. IN A? + "12b98180000100080000000c0672656464697403636f6d0000020001c00c0002000100000005000f046175733204616b616d036e657400c00c000200010000000500070475736534c02dc00c000200010000000500070475737733c02dc00c000200010000000500070475737735c02dc00c00020001000000050008056173696131c02dc00c00020001000000050008056173696139c02dc00c00020001000000050008056e73312d31c02dc00c0002000100000005000a076e73312d313935c02dc02800010001000000050004c30a242ec04300010001000000050004451f1d39c05600010001000000050004451f3bc7c0690001000100000005000460073240c07c000100010000000500046007fb81c090000100010000000500047c283484c090001c00010000000500102a0226f0006700000000000000000064c0a400010001000000050004c16c5b01c0a4001c000100000005001026001401000200000000000000000001c0b800010001000000050004c16c5bc3c0b8001c0001000000050010260014010002000000000000000000c30000290500000000050000", + } + + for i, hexData := range testMessages { + // we won't fail the decoding of the hex + input, _ := hex.DecodeString(hexData) + + m := new(Msg) + m.Unpack(input) + m.Compress = true + lenComp := m.Len() + b, _ := m.Pack() + pacComp := len(b) + m.Compress = false + lenUnComp := m.Len() + b, _ = m.Pack() + pacUnComp := len(b) + if pacComp+1 != lenComp { + t.Errorf("msg.Len(compressed)=%d actual=%d for test %d", lenComp, pacComp, i) + } + if pacUnComp+1 != lenUnComp { + t.Errorf("msg.Len(uncompressed)=%d actual=%d for test %d", lenUnComp, pacUnComp, i) + } + } +} + +func TestMsgLengthCompressionMalformed(t *testing.T) { + // SOA with empty hostmaster, which is illegal + soa := &SOA{Hdr: RR_Header{Name: ".", Rrtype: TypeSOA, Class: ClassINET, Ttl: 12345}, + Ns: ".", + Mbox: "", + Serial: 0, + Refresh: 28800, + Retry: 7200, + Expire: 604800, + Minttl: 60} + m := new(Msg) + m.Compress = true + m.Ns = []RR{soa} + m.Len() // Should not crash. +} + +func TestToRFC3597(t *testing.T) { + a, _ := NewRR("miek.nl. IN A 10.0.1.1") + x := new(RFC3597) + x.ToRFC3597(a) + if x.String() != `miek.nl. 3600 CLASS1 TYPE1 \# 4 0a000101` { + t.Error("string mismatch") + } +} + +func TestNoRdataPack(t *testing.T) { + data := make([]byte, 1024) + for typ, fn := range TypeToRR { + r := fn() + *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} + _, err := PackRR(r, data, 0, nil, false) + if err != nil { + t.Errorf("failed to pack RR with zero rdata: %s: %v", TypeToString[typ], err) + } + } +} + +func TestNoRdataUnpack(t *testing.T) { + data := make([]byte, 1024) + for typ, fn := range TypeToRR { + if typ == TypeSOA || typ == TypeTSIG { + // SOA, TSIG will not be seen (like this) in dyn. updates? + continue + } + r := fn() + *r.Header() = RR_Header{Name: "miek.nl.", Rrtype: typ, Class: ClassINET, Ttl: 16} + off, err := PackRR(r, data, 0, nil, false) + if err != nil { + // Should always works, TestNoDataPack should have caught this + t.Errorf("failed to pack RR: %v", err) + continue + } + rr, _, err := UnpackRR(data[:off], 0) + if err != nil { + t.Errorf("failed to unpack RR with zero rdata: %s: %v", TypeToString[typ], err) + } + t.Log(rr) + } +} + +func TestRdataOverflow(t *testing.T) { + rr := new(RFC3597) + rr.Hdr.Name = "." + rr.Hdr.Class = ClassINET + rr.Hdr.Rrtype = 65280 + rr.Rdata = hex.EncodeToString(make([]byte, 0xFFFF)) + buf := make([]byte, 0xFFFF*2) + if _, err := PackRR(rr, buf, 0, nil, false); err != nil { + t.Fatalf("maximum size rrdata pack failed: %v", err) + } + rr.Rdata += "00" + if _, err := PackRR(rr, buf, 0, nil, false); err != ErrRdata { + t.Fatalf("oversize rrdata pack didn't return ErrRdata - instead: %v", err) + } +} + +func TestCopy(t *testing.T) { + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") // Weird TTL to avoid catching TTL + rr1 := Copy(rr) + if rr.String() != rr1.String() { + t.Fatalf("Copy() failed %s != %s", rr.String(), rr1.String()) + } +} + +func TestMsgCopy(t *testing.T) { + m := new(Msg) + m.SetQuestion("miek.nl.", TypeA) + rr, _ := NewRR("miek.nl. 2311 IN A 127.0.0.1") + m.Answer = []RR{rr} + rr, _ = NewRR("miek.nl. 2311 IN NS 127.0.0.1") + m.Ns = []RR{rr} + + m1 := m.Copy() + if m.String() != m1.String() { + t.Fatalf("Msg.Copy() failed %s != %s", m.String(), m1.String()) + } + + m1.Answer[0], _ = NewRR("somethingelse.nl. 2311 IN A 127.0.0.1") + if m.String() == m1.String() { + t.Fatalf("Msg.Copy() failed; change to copy changed template %s", m.String()) + } + + rr, _ = NewRR("miek.nl. 2311 IN A 127.0.0.2") + m1.Answer = append(m1.Answer, rr) + if m1.Ns[0].String() == m1.Answer[1].String() { + t.Fatalf("Msg.Copy() failed; append changed underlying array %s", m1.Ns[0].String()) + } +} + +func TestPackIPSECKEY(t *testing.T) { + tests := []string{ + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.1.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0 7200 IN IPSECKEY ( 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + } + buf := make([]byte, 1024) + for _, t1 := range tests { + rr, _ := NewRR(t1) + off, err := PackRR(rr, buf, 0, nil, false) + if err != nil { + t.Errorf("failed to pack IPSECKEY %v: %s", err, t1) + continue + } + + rr, _, err = UnpackRR(buf[:off], 0) + if err != nil { + t.Errorf("failed to unpack IPSECKEY %v: %s", err, t1) + } + t.Log(rr) + } +} + +func TestMsgPackBuffer(t *testing.T) { + var testMessages = []string{ + // news.ycombinator.com.in.escapemg.com. IN A, response + "586285830001000000010000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001c0210006000100000e10002c036e7332c02103646e730b67726f6f7665736861726bc02d77ed50e600002a3000000e1000093a8000000e10", + + // news.ycombinator.com.in.escapemg.com. IN A, question + "586201000001000000000000046e6577730b79636f6d62696e61746f7203636f6d02696e086573636170656d6703636f6d0000010001", + + "398781020001000000000000046e6577730b79636f6d62696e61746f7203636f6d0000010001", + } + + for i, hexData := range testMessages { + // we won't fail the decoding of the hex + input, _ := hex.DecodeString(hexData) + m := new(Msg) + if err := m.Unpack(input); err != nil { + t.Errorf("packet %d failed to unpack", i) + continue + } + t.Logf("packet %d %s", i, m.String()) + } +} diff --git a/vendor/github.com/miekg/dns/dnssec_test.go b/vendor/github.com/miekg/dns/dnssec_test.go new file mode 100644 index 0000000000..ca085ed3b8 --- /dev/null +++ b/vendor/github.com/miekg/dns/dnssec_test.go @@ -0,0 +1,733 @@ +package dns + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "reflect" + "strings" + "testing" + "time" +) + +func getKey() *DNSKEY { + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + return key +} + +func getSoa() *SOA { + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + return soa +} + +func TestGenerateEC(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = ECDSAP256SHA256 + privkey, _ := key.Generate(256) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestGenerateDSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = DSA + privkey, _ := key.Generate(1024) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestGenerateRSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + privkey, _ := key.Generate(1024) + t.Log(key.String()) + t.Log(key.PrivateKeyString(privkey)) +} + +func TestSecure(t *testing.T) { + soa := getSoa() + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = TypeSOA + sig.Algorithm = RSASHA256 + sig.Labels = 2 + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.OrigTtl = 14400 + sig.KeyTag = 12051 + sig.SignerName = "miek.nl." + sig.Signature = "oMCbslaAVIp/8kVtLSms3tDABpcPRUgHLrOR48OOplkYo+8TeEGWwkSwaz/MRo2fB4FxW0qj/hTlIjUGuACSd+b1wKdH5GvzRJc2pFmxtCbm55ygAh4EUL0F6U5cKtGJGSXxxg6UFCQ0doJCmiGFa78LolaUOXImJrk6AFrGa0M=" + + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + + // It should validate. Period is checked separately, so this will keep on working + if sig.Verify(key, []RR{soa}) != nil { + t.Error("failure to validate") + } +} + +func TestSignature(t *testing.T) { + sig := new(RRSIG) + sig.Hdr.Name = "miek.nl." + sig.Hdr.Class = ClassINET + sig.Hdr.Ttl = 3600 + sig.TypeCovered = TypeDNSKEY + sig.Algorithm = RSASHA1 + sig.Labels = 2 + sig.OrigTtl = 4000 + sig.Expiration = 1000 //Thu Jan 1 02:06:40 CET 1970 + sig.Inception = 800 //Thu Jan 1 01:13:20 CET 1970 + sig.KeyTag = 34641 + sig.SignerName = "miek.nl." + sig.Signature = "AwEAAaHIwpx3w4VHKi6i1LHnTaWeHCL154Jug0Rtc9ji5qwPXpBo6A5sRv7cSsPQKPIwxLpyCrbJ4mr2L0EPOdvP6z6YfljK2ZmTbogU9aSU2fiq/4wjxbdkLyoDVgtO+JsxNN4bjr4WcWhsmk1Hg93FV9ZpkWb0Tbad8DFqNDzr//kZ" + + // Should not be valid + if sig.ValidityPeriod(time.Now()) { + t.Error("should not be valid") + } + + sig.Inception = 315565800 //Tue Jan 1 10:10:00 CET 1980 + sig.Expiration = 4102477800 //Fri Jan 1 10:10:00 CET 2100 + if !sig.ValidityPeriod(time.Now()) { + t.Error("should be valid") + } +} + +func TestSignVerify(t *testing.T) { + // The record we want to sign + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + + soa1 := new(SOA) + soa1.Hdr = RR_Header{"*.miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa1.Ns = "open.nlnetlabs.nl." + soa1.Mbox = "miekg.atoom.net." + soa1.Serial = 1293945905 + soa1.Refresh = 14400 + soa1.Retry = 3600 + soa1.Expire = 604800 + soa1.Minttl = 86400 + + srv := new(SRV) + srv.Hdr = RR_Header{"srv.miek.nl.", TypeSRV, ClassINET, 14400, 0} + srv.Port = 1000 + srv.Weight = 800 + srv.Target = "web1.miek.nl." + + hinfo := &HINFO{ + Hdr: RR_Header{ + Name: "miek.nl.", + Rrtype: TypeHINFO, + Class: ClassINET, + Ttl: 3789, + }, + Cpu: "X", + Os: "Y", + } + + // With this key + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + privkey, _ := key.Generate(512) + + // Fill in the values of the Sig, before signing + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = soa.Hdr.Rrtype + sig.Labels = uint8(CountLabel(soa.Hdr.Name)) // works for all 3 + sig.OrigTtl = soa.Hdr.Ttl + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = key.KeyTag() // Get the keyfrom the Key + sig.SignerName = key.Hdr.Name + sig.Algorithm = RSASHA256 + + for _, r := range []RR{soa, soa1, srv, hinfo} { + if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{r}); err != nil { + t.Error("failure to sign the record:", err) + continue + } + if err := sig.Verify(key, []RR{r}); err != nil { + t.Error("failure to validate") + continue + } + t.Logf("validated: %s", r.Header().Name) + } +} + +func Test65534(t *testing.T) { + t6 := new(RFC3597) + t6.Hdr = RR_Header{"miek.nl.", 65534, ClassINET, 14400, 0} + t6.Rdata = "505D870001" + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + privkey, _ := key.Generate(1024) + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = t6.Hdr.Rrtype + sig.Labels = uint8(CountLabel(t6.Hdr.Name)) + sig.OrigTtl = t6.Hdr.Ttl + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = key.KeyTag() + sig.SignerName = key.Hdr.Name + sig.Algorithm = RSASHA256 + if err := sig.Sign(privkey.(*rsa.PrivateKey), []RR{t6}); err != nil { + t.Error(err) + t.Error("failure to sign the TYPE65534 record") + } + if err := sig.Verify(key, []RR{t6}); err != nil { + t.Error(err) + t.Error("failure to validate") + } else { + t.Logf("validated: %s", t6.Header().Name) + } +} + +func TestDnskey(t *testing.T) { + pubkey, err := ReadRR(strings.NewReader(` +miek.nl. IN DNSKEY 256 3 10 AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL ;{id = 5240 (zsk), size = 1024b} +`), "Kmiek.nl.+010+05240.key") + if err != nil { + t.Fatal(err) + } + privStr := `Private-key-format: v1.3 +Algorithm: 10 (RSASHA512) +Modulus: m4wK7YV26AeROtdiCXmqLG9wPDVoMOW8vjr/EkpscEAdjXp81RvZvrlzCSjYmz9onFRgltmTl3AINnFh+t9tlW0M9C5zejxBoKFXELv8ljPYAdz2oe+pDWPhWsfvVFYg2VCjpViPM38EakyE5mhk4TDOnUd+w4TeU1hyhZTWyYs= +PublicExponent: AQAB +PrivateExponent: UfCoIQ/Z38l8vB6SSqOI/feGjHEl/fxIPX4euKf0D/32k30fHbSaNFrFOuIFmWMB3LimWVEs6u3dpbB9CQeCVg7hwU5puG7OtuiZJgDAhNeOnxvo5btp4XzPZrJSxR4WNQnwIiYWbl0aFlL1VGgHC/3By89ENZyWaZcMLW4KGWE= +Prime1: yxwC6ogAu8aVcDx2wg1V0b5M5P6jP8qkRFVMxWNTw60Vkn+ECvw6YAZZBHZPaMyRYZLzPgUlyYRd0cjupy4+fQ== +Prime2: xA1bF8M0RTIQ6+A11AoVG6GIR/aPGg5sogRkIZ7ID/sF6g9HMVU/CM2TqVEBJLRPp73cv6ZeC3bcqOCqZhz+pw== +Exponent1: xzkblyZ96bGYxTVZm2/vHMOXswod4KWIyMoOepK6B/ZPcZoIT6omLCgtypWtwHLfqyCz3MK51Nc0G2EGzg8rFQ== +Exponent2: Pu5+mCEb7T5F+kFNZhQadHUklt0JUHbi3hsEvVoHpEGSw3BGDQrtIflDde0/rbWHgDPM4WQY+hscd8UuTXrvLw== +Coefficient: UuRoNqe7YHnKmQzE6iDWKTMIWTuoqqrFAmXPmKQnC+Y+BQzOVEHUo9bXdDnoI9hzXP1gf8zENMYwYLeWpuYlFQ== +` + privkey, err := pubkey.(*DNSKEY).ReadPrivateKey(strings.NewReader(privStr), + "Kmiek.nl.+010+05240.private") + if err != nil { + t.Fatal(err) + } + if pubkey.(*DNSKEY).PublicKey != "AwEAAZuMCu2FdugHkTrXYgl5qixvcDw1aDDlvL46/xJKbHBAHY16fNUb2b65cwko2Js/aJxUYJbZk5dwCDZxYfrfbZVtDPQuc3o8QaChVxC7/JYz2AHc9qHvqQ1j4VrH71RWINlQo6VYjzN/BGpMhOZoZOEwzp1HfsOE3lNYcoWU1smL" { + t.Error("pubkey is not what we've read") + } + if pubkey.(*DNSKEY).PrivateKeyString(privkey) != privStr { + t.Error("privkey is not what we've read") + t.Errorf("%v", pubkey.(*DNSKEY).PrivateKeyString(privkey)) + } +} + +func TestTag(t *testing.T) { + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 3600 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + + tag := key.KeyTag() + if tag != 12051 { + t.Errorf("wrong key tag: %d for key %v", tag, key) + } +} + +func TestKeyRSA(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 3600 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + priv, _ := key.Generate(2048) + + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = TypeSOA + sig.Algorithm = RSASHA256 + sig.Labels = 2 + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.OrigTtl = soa.Hdr.Ttl + sig.KeyTag = key.KeyTag() + sig.SignerName = key.Hdr.Name + + if err := sig.Sign(priv.(*rsa.PrivateKey), []RR{soa}); err != nil { + t.Error("failed to sign") + return + } + if err := sig.Verify(key, []RR{soa}); err != nil { + t.Error("failed to verify") + } +} + +func TestKeyToDS(t *testing.T) { + key := new(DNSKEY) + key.Hdr.Name = "miek.nl." + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 3600 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = RSASHA256 + key.PublicKey = "AwEAAcNEU67LJI5GEgF9QLNqLO1SMq1EdoQ6E9f85ha0k0ewQGCblyW2836GiVsm6k8Kr5ECIoMJ6fZWf3CQSQ9ycWfTyOHfmI3eQ/1Covhb2y4bAmL/07PhrL7ozWBW3wBfM335Ft9xjtXHPy7ztCbV9qZ4TVDTW/Iyg0PiwgoXVesz" + + ds := key.ToDS(SHA1) + if strings.ToUpper(ds.Digest) != "B5121BDB5B8D86D0CC5FFAFBAAABE26C3E20BAC1" { + t.Errorf("wrong DS digest for SHA1\n%v", ds) + } +} + +func TestSignRSA(t *testing.T) { + pub := "miek.nl. IN DNSKEY 256 3 5 AwEAAb+8lGNCxJgLS8rYVer6EnHVuIkQDghdjdtewDzU3G5R7PbMbKVRvH2Ma7pQyYceoaqWZQirSj72euPWfPxQnMy9ucCylA+FuH9cSjIcPf4PqJfdupHk9X6EBYjxrCLY4p1/yBwgyBIRJtZtAqM3ceAH2WovEJD6rTtOuHo5AluJ" + + priv := `Private-key-format: v1.3 +Algorithm: 5 (RSASHA1) +Modulus: v7yUY0LEmAtLythV6voScdW4iRAOCF2N217APNTcblHs9sxspVG8fYxrulDJhx6hqpZlCKtKPvZ649Z8/FCczL25wLKUD4W4f1xKMhw9/g+ol926keT1foQFiPGsItjinX/IHCDIEhEm1m0Cozdx4AfZai8QkPqtO064ejkCW4k= +PublicExponent: AQAB +PrivateExponent: YPwEmwjk5HuiROKU4xzHQ6l1hG8Iiha4cKRG3P5W2b66/EN/GUh07ZSf0UiYB67o257jUDVEgwCuPJz776zfApcCB4oGV+YDyEu7Hp/rL8KcSN0la0k2r9scKwxTp4BTJT23zyBFXsV/1wRDK1A5NxsHPDMYi2SoK63Enm/1ptk= +Prime1: /wjOG+fD0ybNoSRn7nQ79udGeR1b0YhUA5mNjDx/x2fxtIXzygYk0Rhx9QFfDy6LOBvz92gbNQlzCLz3DJt5hw== +Prime2: wHZsJ8OGhkp5p3mrJFZXMDc2mbYusDVTA+t+iRPdS797Tj0pjvU2HN4vTnTj8KBQp6hmnY7dLp9Y1qserySGbw== +Exponent1: N0A7FsSRIg+IAN8YPQqlawoTtG1t1OkJ+nWrurPootScApX6iMvn8fyvw3p2k51rv84efnzpWAYiC8SUaQDNxQ== +Exponent2: SvuYRaGyvo0zemE3oS+WRm2scxR8eiA8WJGeOc+obwOKCcBgeZblXzfdHGcEC1KaOcetOwNW/vwMA46lpLzJNw== +Coefficient: 8+7ZN/JgByqv0NfULiFKTjtyegUcijRuyij7yNxYbCBneDvZGxJwKNi4YYXWx743pcAj4Oi4Oh86gcmxLs+hGw== +Created: 20110302104537 +Publish: 20110302104537 +Activate: 20110302104537` + + xk, _ := NewRR(pub) + k := xk.(*DNSKEY) + p, err := k.NewPrivateKey(priv) + if err != nil { + t.Error(err) + } + switch priv := p.(type) { + case *rsa.PrivateKey: + if 65537 != priv.PublicKey.E { + t.Error("exponenent should be 65537") + } + default: + t.Errorf("we should have read an RSA key: %v", priv) + } + if k.KeyTag() != 37350 { + t.Errorf("keytag should be 37350, got %d %v", k.KeyTag(), k) + } + + soa := new(SOA) + soa.Hdr = RR_Header{"miek.nl.", TypeSOA, ClassINET, 14400, 0} + soa.Ns = "open.nlnetlabs.nl." + soa.Mbox = "miekg.atoom.net." + soa.Serial = 1293945905 + soa.Refresh = 14400 + soa.Retry = 3600 + soa.Expire = 604800 + soa.Minttl = 86400 + + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = k.KeyTag() + sig.SignerName = k.Hdr.Name + sig.Algorithm = k.Algorithm + + sig.Sign(p.(*rsa.PrivateKey), []RR{soa}) + if sig.Signature != "D5zsobpQcmMmYsUMLxCVEtgAdCvTu8V/IEeP4EyLBjqPJmjt96bwM9kqihsccofA5LIJ7DN91qkCORjWSTwNhzCv7bMyr2o5vBZElrlpnRzlvsFIoAZCD9xg6ZY7ZyzUJmU6IcTwG4v3xEYajcpbJJiyaw/RqR90MuRdKPiBzSo=" { + t.Errorf("signature is not correct: %v", sig) + } +} + +func TestSignVerifyECDSA(t *testing.T) { + pub := `example.net. 3600 IN DNSKEY 257 3 14 ( + xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 + w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 + /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` + priv := `Private-key-format: v1.2 +Algorithm: 14 (ECDSAP384SHA384) +PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` + + eckey, err := NewRR(pub) + if err != nil { + t.Fatal(err) + } + privkey, err := eckey.(*DNSKEY).NewPrivateKey(priv) + if err != nil { + t.Fatal(err) + } + // TODO: Create separate test for this + ds := eckey.(*DNSKEY).ToDS(SHA384) + if ds.KeyTag != 10771 { + t.Fatal("wrong keytag on DS") + } + if ds.Digest != "72d7b62976ce06438e9c0bf319013cf801f09ecc84b8d7e9495f27e305c6a9b0563a9b5f4d288405c3008a946df983d6" { + t.Fatal("wrong DS Digest") + } + a, _ := NewRR("www.example.net. 3600 IN A 192.0.2.1") + sig := new(RRSIG) + sig.Hdr = RR_Header{"example.net.", TypeRRSIG, ClassINET, 14400, 0} + sig.Expiration, _ = StringToTime("20100909102025") + sig.Inception, _ = StringToTime("20100812102025") + sig.KeyTag = eckey.(*DNSKEY).KeyTag() + sig.SignerName = eckey.(*DNSKEY).Hdr.Name + sig.Algorithm = eckey.(*DNSKEY).Algorithm + + if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{a}) != nil { + t.Fatal("failure to sign the record") + } + + if err := sig.Verify(eckey.(*DNSKEY), []RR{a}); err != nil { + t.Fatalf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", + eckey.(*DNSKEY).String(), + a.String(), + sig.String(), + eckey.(*DNSKEY).PrivateKeyString(privkey), + err, + ) + } +} + +func TestSignVerifyECDSA2(t *testing.T) { + srv1, err := NewRR("srv.miek.nl. IN SRV 1000 800 0 web1.miek.nl.") + if err != nil { + t.Fatal(err) + } + srv := srv1.(*SRV) + + // With this key + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = ECDSAP256SHA256 + privkey, err := key.Generate(256) + if err != nil { + t.Fatal("failure to generate key") + } + + // Fill in the values of the Sig, before signing + sig := new(RRSIG) + sig.Hdr = RR_Header{"miek.nl.", TypeRRSIG, ClassINET, 14400, 0} + sig.TypeCovered = srv.Hdr.Rrtype + sig.Labels = uint8(CountLabel(srv.Hdr.Name)) // works for all 3 + sig.OrigTtl = srv.Hdr.Ttl + sig.Expiration = 1296534305 // date -u '+%s' -d"2011-02-01 04:25:05" + sig.Inception = 1293942305 // date -u '+%s' -d"2011-01-02 04:25:05" + sig.KeyTag = key.KeyTag() // Get the keyfrom the Key + sig.SignerName = key.Hdr.Name + sig.Algorithm = ECDSAP256SHA256 + + if sig.Sign(privkey.(*ecdsa.PrivateKey), []RR{srv}) != nil { + t.Fatal("failure to sign the record") + } + + err = sig.Verify(key, []RR{srv}) + if err != nil { + t.Logf("failure to validate:\n%s\n%s\n%s\n\n%s\n\n%v", + key.String(), + srv.String(), + sig.String(), + key.PrivateKeyString(privkey), + err, + ) + } +} + +// Here the test vectors from the relevant RFCs are checked. +// rfc6605 6.1 +func TestRFC6605P256(t *testing.T) { + exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 13 ( + GojIhhXUN/u4v54ZQqGSnyhWJwaubCvTmeexv7bR6edb + krSqQpF64cYbcB7wNcP+e+MAnLr+Wi9xMWyQLc8NAA== )` + exPriv := `Private-key-format: v1.2 +Algorithm: 13 (ECDSAP256SHA256) +PrivateKey: GU6SnQ/Ou+xC5RumuIUIuJZteXT2z0O/ok1s38Et6mQ=` + rrDNSKEY, err := NewRR(exDNSKEY) + if err != nil { + t.Fatal(err) + } + priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) + if err != nil { + t.Fatal(err) + } + + exDS := `example.net. 3600 IN DS 55648 13 2 ( + b4c8c1fe2e7477127b27115656ad6256f424625bf5c1 + e2770ce6d6e37df61d17 )` + rrDS, err := NewRR(exDS) + if err != nil { + t.Fatal(err) + } + ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA256) + if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { + t.Errorf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) + } + + exA := `www.example.net. 3600 IN A 192.0.2.1` + exRRSIG := `www.example.net. 3600 IN RRSIG A 13 3 3600 ( + 20100909100439 20100812100439 55648 example.net. + qx6wLYqmh+l9oCKTN6qIc+bw6ya+KJ8oMz0YP107epXA + yGmt+3SNruPFKG7tZoLBLlUzGGus7ZwmwWep666VCw== )` + rrA, err := NewRR(exA) + if err != nil { + t.Fatal(err) + } + rrRRSIG, err := NewRR(exRRSIG) + if err != nil { + t.Fatal(err) + } + if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate the spec RRSIG: %v", err) + } + + ourRRSIG := &RRSIG{ + Hdr: RR_Header{ + Ttl: rrA.Header().Ttl, + }, + KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), + SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, + Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, + } + ourRRSIG.Expiration, _ = StringToTime("20100909100439") + ourRRSIG.Inception, _ = StringToTime("20100812100439") + err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) + if err != nil { + t.Fatal(err) + } + + if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate our RRSIG: %v", err) + } + + // Signatures are randomized + rrRRSIG.(*RRSIG).Signature = "" + ourRRSIG.Signature = "" + if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { + t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) + } +} + +// rfc6605 6.2 +func TestRFC6605P384(t *testing.T) { + exDNSKEY := `example.net. 3600 IN DNSKEY 257 3 14 ( + xKYaNhWdGOfJ+nPrL8/arkwf2EY3MDJ+SErKivBVSum1 + w/egsXvSADtNJhyem5RCOpgQ6K8X1DRSEkrbYQ+OB+v8 + /uX45NBwY8rp65F6Glur8I/mlVNgF6W/qTI37m40 )` + exPriv := `Private-key-format: v1.2 +Algorithm: 14 (ECDSAP384SHA384) +PrivateKey: WURgWHCcYIYUPWgeLmiPY2DJJk02vgrmTfitxgqcL4vwW7BOrbawVmVe0d9V94SR` + rrDNSKEY, err := NewRR(exDNSKEY) + if err != nil { + t.Fatal(err) + } + priv, err := rrDNSKEY.(*DNSKEY).NewPrivateKey(exPriv) + if err != nil { + t.Fatal(err) + } + + exDS := `example.net. 3600 IN DS 10771 14 4 ( + 72d7b62976ce06438e9c0bf319013cf801f09ecc84b8 + d7e9495f27e305c6a9b0563a9b5f4d288405c3008a94 + 6df983d6 )` + rrDS, err := NewRR(exDS) + if err != nil { + t.Fatal(err) + } + ourDS := rrDNSKEY.(*DNSKEY).ToDS(SHA384) + if !reflect.DeepEqual(ourDS, rrDS.(*DS)) { + t.Fatalf("DS record differs:\n%v\n%v", ourDS, rrDS.(*DS)) + } + + exA := `www.example.net. 3600 IN A 192.0.2.1` + exRRSIG := `www.example.net. 3600 IN RRSIG A 14 3 3600 ( + 20100909102025 20100812102025 10771 example.net. + /L5hDKIvGDyI1fcARX3z65qrmPsVz73QD1Mr5CEqOiLP + 95hxQouuroGCeZOvzFaxsT8Glr74hbavRKayJNuydCuz + WTSSPdz7wnqXL5bdcJzusdnI0RSMROxxwGipWcJm )` + rrA, err := NewRR(exA) + if err != nil { + t.Fatal(err) + } + rrRRSIG, err := NewRR(exRRSIG) + if err != nil { + t.Fatal(err) + } + if err = rrRRSIG.(*RRSIG).Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate the spec RRSIG: %v", err) + } + + ourRRSIG := &RRSIG{ + Hdr: RR_Header{ + Ttl: rrA.Header().Ttl, + }, + KeyTag: rrDNSKEY.(*DNSKEY).KeyTag(), + SignerName: rrDNSKEY.(*DNSKEY).Hdr.Name, + Algorithm: rrDNSKEY.(*DNSKEY).Algorithm, + } + ourRRSIG.Expiration, _ = StringToTime("20100909102025") + ourRRSIG.Inception, _ = StringToTime("20100812102025") + err = ourRRSIG.Sign(priv.(*ecdsa.PrivateKey), []RR{rrA}) + if err != nil { + t.Fatal(err) + } + + if err = ourRRSIG.Verify(rrDNSKEY.(*DNSKEY), []RR{rrA}); err != nil { + t.Errorf("failure to validate our RRSIG: %v", err) + } + + // Signatures are randomized + rrRRSIG.(*RRSIG).Signature = "" + ourRRSIG.Signature = "" + if !reflect.DeepEqual(ourRRSIG, rrRRSIG.(*RRSIG)) { + t.Fatalf("RRSIG record differs:\n%v\n%v", ourRRSIG, rrRRSIG.(*RRSIG)) + } +} + +func TestInvalidRRSet(t *testing.T) { + goodRecords := make([]RR, 2) + goodRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + goodRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} + + // Generate key + keyname := "cloudflare.com." + key := &DNSKEY{ + Hdr: RR_Header{Name: keyname, Rrtype: TypeDNSKEY, Class: ClassINET, Ttl: 0}, + Algorithm: ECDSAP256SHA256, + Flags: ZONE, + Protocol: 3, + } + privatekey, err := key.Generate(256) + if err != nil { + t.Fatal(err.Error()) + } + + // Need to fill in: Inception, Expiration, KeyTag, SignerName and Algorithm + curTime := time.Now() + signature := &RRSIG{ + Inception: uint32(curTime.Unix()), + Expiration: uint32(curTime.Add(time.Hour).Unix()), + KeyTag: key.KeyTag(), + SignerName: keyname, + Algorithm: ECDSAP256SHA256, + } + + // Inconsistent name between records + badRecords := make([]RR, 2) + badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + badRecords[1] = &TXT{Hdr: RR_Header{Name: "nama.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"_o/"}} + + if IsRRset(badRecords) { + t.Fatal("Record set with inconsistent names considered valid") + } + + badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + badRecords[1] = &A{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeA, Class: ClassINET, Ttl: 0}} + + if IsRRset(badRecords) { + t.Fatal("Record set with inconsistent record types considered valid") + } + + badRecords[0] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + badRecords[1] = &TXT{Hdr: RR_Header{Name: "name.cloudflare.com.", Rrtype: TypeTXT, Class: ClassCHAOS, Ttl: 0}, Txt: []string{"_o/"}} + + if IsRRset(badRecords) { + t.Fatal("Record set with inconsistent record class considered valid") + } + + // Sign the good record set and then make sure verification fails on the bad record set + if err := signature.Sign(privatekey.(crypto.Signer), goodRecords); err != nil { + t.Fatal("Signing good records failed") + } + + if err := signature.Verify(key, badRecords); err != ErrRRset { + t.Fatal("Verification did not return ErrRRset with inconsistent records") + } +} diff --git a/vendor/github.com/miekg/dns/dyn_test.go b/vendor/github.com/miekg/dns/dyn_test.go new file mode 100644 index 0000000000..09986a5e4e --- /dev/null +++ b/vendor/github.com/miekg/dns/dyn_test.go @@ -0,0 +1,3 @@ +package dns + +// Find better solution diff --git a/vendor/github.com/miekg/dns/edns_test.go b/vendor/github.com/miekg/dns/edns_test.go new file mode 100644 index 0000000000..5fd75abb45 --- /dev/null +++ b/vendor/github.com/miekg/dns/edns_test.go @@ -0,0 +1,32 @@ +package dns + +import "testing" + +func TestOPTTtl(t *testing.T) { + e := &OPT{} + e.Hdr.Name = "." + e.Hdr.Rrtype = TypeOPT + + if e.Do() { + t.Errorf("DO bit should be zero") + } + + e.SetDo() + if !e.Do() { + t.Errorf("DO bit should be non-zero") + } + + if e.Version() != 0 { + t.Errorf("version should be non-zero") + } + + e.SetVersion(42) + if e.Version() != 42 { + t.Errorf("set 42, expected %d, got %d", 42, e.Version()) + } + + e.SetExtendedRcode(42) + if e.ExtendedRcode() != 42 { + t.Errorf("set 42, expected %d, got %d", 42-15, e.ExtendedRcode()) + } +} diff --git a/vendor/github.com/miekg/dns/example_test.go b/vendor/github.com/miekg/dns/example_test.go new file mode 100644 index 0000000000..64c14962cd --- /dev/null +++ b/vendor/github.com/miekg/dns/example_test.go @@ -0,0 +1,146 @@ +package dns_test + +import ( + "errors" + "fmt" + "log" + "net" + + "github.com/miekg/dns" +) + +// Retrieve the MX records for miek.nl. +func ExampleMX() { + config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") + c := new(dns.Client) + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) + m.RecursionDesired = true + r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) + if err != nil { + return + } + if r.Rcode != dns.RcodeSuccess { + return + } + for _, a := range r.Answer { + if mx, ok := a.(*dns.MX); ok { + fmt.Printf("%s\n", mx.String()) + } + } +} + +// Retrieve the DNSKEY records of a zone and convert them +// to DS records for SHA1, SHA256 and SHA384. +func ExampleDS() { + config, _ := dns.ClientConfigFromFile("/etc/resolv.conf") + c := new(dns.Client) + m := new(dns.Msg) + zone := "miek.nl" + m.SetQuestion(dns.Fqdn(zone), dns.TypeDNSKEY) + m.SetEdns0(4096, true) + r, _, err := c.Exchange(m, config.Servers[0]+":"+config.Port) + if err != nil { + return + } + if r.Rcode != dns.RcodeSuccess { + return + } + for _, k := range r.Answer { + if key, ok := k.(*dns.DNSKEY); ok { + for _, alg := range []uint8{dns.SHA1, dns.SHA256, dns.SHA384} { + fmt.Printf("%s; %d\n", key.ToDS(alg).String(), key.Flags) + } + } + } +} + +const TypeAPAIR = 0x0F99 + +type APAIR struct { + addr [2]net.IP +} + +func NewAPAIR() dns.PrivateRdata { return new(APAIR) } + +func (rd *APAIR) String() string { return rd.addr[0].String() + " " + rd.addr[1].String() } +func (rd *APAIR) Parse(txt []string) error { + if len(txt) != 2 { + return errors.New("two addresses required for APAIR") + } + for i, s := range txt { + ip := net.ParseIP(s) + if ip == nil { + return errors.New("invalid IP in APAIR text representation") + } + rd.addr[i] = ip + } + return nil +} + +func (rd *APAIR) Pack(buf []byte) (int, error) { + b := append([]byte(rd.addr[0]), []byte(rd.addr[1])...) + n := copy(buf, b) + if n != len(b) { + return n, dns.ErrBuf + } + return n, nil +} + +func (rd *APAIR) Unpack(buf []byte) (int, error) { + ln := net.IPv4len * 2 + if len(buf) != ln { + return 0, errors.New("invalid length of APAIR rdata") + } + cp := make([]byte, ln) + copy(cp, buf) // clone bytes to use them in IPs + + rd.addr[0] = net.IP(cp[:3]) + rd.addr[1] = net.IP(cp[4:]) + + return len(buf), nil +} + +func (rd *APAIR) Copy(dest dns.PrivateRdata) error { + cp := make([]byte, rd.Len()) + _, err := rd.Pack(cp) + if err != nil { + return err + } + + d := dest.(*APAIR) + d.addr[0] = net.IP(cp[:3]) + d.addr[1] = net.IP(cp[4:]) + return nil +} + +func (rd *APAIR) Len() int { + return net.IPv4len * 2 +} + +func ExamplePrivateHandle() { + dns.PrivateHandle("APAIR", TypeAPAIR, NewAPAIR) + defer dns.PrivateHandleRemove(TypeAPAIR) + + rr, err := dns.NewRR("miek.nl. APAIR (1.2.3.4 1.2.3.5)") + if err != nil { + log.Fatal("could not parse APAIR record: ", err) + } + fmt.Println(rr) + // Output: miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 + + m := new(dns.Msg) + m.Id = 12345 + m.SetQuestion("miek.nl.", TypeAPAIR) + m.Answer = append(m.Answer, rr) + + fmt.Println(m) + // ;; opcode: QUERY, status: NOERROR, id: 12345 + // ;; flags: rd; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 0 + // + // ;; QUESTION SECTION: + // ;miek.nl. IN APAIR + // + // ;; ANSWER SECTION: + // miek.nl. 3600 IN APAIR 1.2.3.4 1.2.3.5 +} diff --git a/vendor/github.com/miekg/dns/fuzz_test.go b/vendor/github.com/miekg/dns/fuzz_test.go new file mode 100644 index 0000000000..255869730e --- /dev/null +++ b/vendor/github.com/miekg/dns/fuzz_test.go @@ -0,0 +1,25 @@ +package dns + +import "testing" + +func TestFuzzString(t *testing.T) { + testcases := []string{"", " MINFO ", " RP ", " NSEC 0 0", " \" NSEC 0 0\"", " \" MINFO \"", + ";a ", ";a����������", + " NSAP O ", " NSAP N ", + " TYPE4 TYPE6a789a3bc0045c8a5fb42c7d1bd998f5444 IN 9579b47d46817afbd17273e6", + " TYPE45 3 3 4147994 TYPE\\(\\)\\)\\(\\)\\(\\(\\)\\(\\)\\)\\)\\(\\)\\(\\)\\(\\(\\R 948\"\")\\(\\)\\)\\)\\(\\ ", + "$GENERATE 0-3 ${441189,5039418474430,o}", + "$INCLUDE 00 TYPE00000000000n ", + "$INCLUDE PE4 TYPE061463623/727071511 \\(\\)\\$GENERATE 6-462/0", + } + for i, tc := range testcases { + rr, err := NewRR(tc) + if err == nil { + // rr can be nil because we can (for instance) just parse a comment + if rr == nil { + continue + } + t.Fatalf("parsed mailformed RR %d: %s", i, rr.String()) + } + } +} diff --git a/vendor/github.com/miekg/dns/issue_test.go b/vendor/github.com/miekg/dns/issue_test.go new file mode 100644 index 0000000000..3025fc98cb --- /dev/null +++ b/vendor/github.com/miekg/dns/issue_test.go @@ -0,0 +1,23 @@ +package dns + +// Tests that solve that an specific issue. + +import "testing" + +func TestTCPRtt(t *testing.T) { + m := new(Msg) + m.RecursionDesired = true + m.SetQuestion("example.org.", TypeA) + + c := &Client{} + for _, proto := range []string{"udp", "tcp"} { + c.Net = proto + _, rtt, err := c.Exchange(m, "8.8.4.4:53") + if err != nil { + t.Fatal(err) + } + if rtt == 0 { + t.Fatalf("expecting non zero rtt %s, got zero", c.Net) + } + } +} diff --git a/vendor/github.com/miekg/dns/labels_test.go b/vendor/github.com/miekg/dns/labels_test.go new file mode 100644 index 0000000000..536757d52f --- /dev/null +++ b/vendor/github.com/miekg/dns/labels_test.go @@ -0,0 +1,200 @@ +package dns + +import "testing" + +func TestCompareDomainName(t *testing.T) { + s1 := "www.miek.nl." + s2 := "miek.nl." + s3 := "www.bla.nl." + s4 := "nl.www.bla." + s5 := "nl" + s6 := "miek.nl" + + if CompareDomainName(s1, s2) != 2 { + t.Errorf("%s with %s should be %d", s1, s2, 2) + } + if CompareDomainName(s1, s3) != 1 { + t.Errorf("%s with %s should be %d", s1, s3, 1) + } + if CompareDomainName(s3, s4) != 0 { + t.Errorf("%s with %s should be %d", s3, s4, 0) + } + // Non qualified tests + if CompareDomainName(s1, s5) != 1 { + t.Errorf("%s with %s should be %d", s1, s5, 1) + } + if CompareDomainName(s1, s6) != 2 { + t.Errorf("%s with %s should be %d", s1, s5, 2) + } + + if CompareDomainName(s1, ".") != 0 { + t.Errorf("%s with %s should be %d", s1, s5, 0) + } + if CompareDomainName(".", ".") != 0 { + t.Errorf("%s with %s should be %d", ".", ".", 0) + } +} + +func TestSplit(t *testing.T) { + splitter := map[string]int{ + "www.miek.nl.": 3, + "www.miek.nl": 3, + "www..miek.nl": 4, + `www\.miek.nl.`: 2, + `www\\.miek.nl.`: 3, + ".": 0, + "nl.": 1, + "nl": 1, + "com.": 1, + ".com.": 2, + } + for s, i := range splitter { + if x := len(Split(s)); x != i { + t.Errorf("labels should be %d, got %d: %s %v", i, x, s, Split(s)) + } else { + t.Logf("%s %v", s, Split(s)) + } + } +} + +func TestSplit2(t *testing.T) { + splitter := map[string][]int{ + "www.miek.nl.": {0, 4, 9}, + "www.miek.nl": {0, 4, 9}, + "nl": {0}, + } + for s, i := range splitter { + x := Split(s) + switch len(i) { + case 1: + if x[0] != i[0] { + t.Errorf("labels should be %v, got %v: %s", i, x, s) + } + default: + if x[0] != i[0] || x[1] != i[1] || x[2] != i[2] { + t.Errorf("labels should be %v, got %v: %s", i, x, s) + } + } + } +} + +func TestPrevLabel(t *testing.T) { + type prev struct { + string + int + } + prever := map[prev]int{ + prev{"www.miek.nl.", 0}: 12, + prev{"www.miek.nl.", 1}: 9, + prev{"www.miek.nl.", 2}: 4, + + prev{"www.miek.nl", 0}: 11, + prev{"www.miek.nl", 1}: 9, + prev{"www.miek.nl", 2}: 4, + + prev{"www.miek.nl.", 5}: 0, + prev{"www.miek.nl", 5}: 0, + + prev{"www.miek.nl.", 3}: 0, + prev{"www.miek.nl", 3}: 0, + } + for s, i := range prever { + x, ok := PrevLabel(s.string, s.int) + if i != x { + t.Errorf("label should be %d, got %d, %t: preving %d, %s", i, x, ok, s.int, s.string) + } + } +} + +func TestCountLabel(t *testing.T) { + splitter := map[string]int{ + "www.miek.nl.": 3, + "www.miek.nl": 3, + "nl": 1, + ".": 0, + } + for s, i := range splitter { + x := CountLabel(s) + if x != i { + t.Errorf("CountLabel should have %d, got %d", i, x) + } + } +} + +func TestSplitDomainName(t *testing.T) { + labels := map[string][]string{ + "miek.nl": {"miek", "nl"}, + ".": nil, + "www.miek.nl.": {"www", "miek", "nl"}, + "www.miek.nl": {"www", "miek", "nl"}, + "www..miek.nl": {"www", "", "miek", "nl"}, + `www\.miek.nl`: {`www\.miek`, "nl"}, + `www\\.miek.nl`: {`www\\`, "miek", "nl"}, + ".www.miek.nl.": {"", "www", "miek", "nl"}, + } +domainLoop: + for domain, splits := range labels { + parts := SplitDomainName(domain) + if len(parts) != len(splits) { + t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) + continue domainLoop + } + for i := range parts { + if parts[i] != splits[i] { + t.Errorf("SplitDomainName returned %v for %s, expected %v", parts, domain, splits) + continue domainLoop + } + } + } +} + +func TestIsDomainName(t *testing.T) { + type ret struct { + ok bool + lab int + } + names := map[string]*ret{ + "..": {false, 1}, + "@.": {true, 1}, + "www.example.com": {true, 3}, + "www.e%ample.com": {true, 3}, + "www.example.com.": {true, 3}, + "mi\\k.nl.": {true, 2}, + "mi\\k.nl": {true, 2}, + } + for d, ok := range names { + l, k := IsDomainName(d) + if ok.ok != k || ok.lab != l { + t.Errorf(" got %v %d for %s ", k, l, d) + t.Errorf("have %v %d for %s ", ok.ok, ok.lab, d) + } + } +} + +func BenchmarkSplitLabels(b *testing.B) { + for i := 0; i < b.N; i++ { + Split("www.example.com") + } +} + +func BenchmarkLenLabels(b *testing.B) { + for i := 0; i < b.N; i++ { + CountLabel("www.example.com") + } +} + +func BenchmarkCompareLabels(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + CompareDomainName("www.example.com", "aa.example.com") + } +} + +func BenchmarkIsSubDomain(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + IsSubDomain("www.example.com", "aa.example.com") + IsSubDomain("example.com", "aa.example.com") + IsSubDomain("miek.nl", "aa.example.com") + } +} diff --git a/vendor/github.com/miekg/dns/nsecx_test.go b/vendor/github.com/miekg/dns/nsecx_test.go new file mode 100644 index 0000000000..93e0c63fce --- /dev/null +++ b/vendor/github.com/miekg/dns/nsecx_test.go @@ -0,0 +1,29 @@ +package dns + +import ( + "testing" +) + +func TestPackNsec3(t *testing.T) { + nsec3 := HashName("dnsex.nl.", SHA1, 0, "DEAD") + if nsec3 != "ROCCJAE8BJJU7HN6T7NG3TNM8ACRS87J" { + t.Error(nsec3) + } + + nsec3 = HashName("a.b.c.example.org.", SHA1, 2, "DEAD") + if nsec3 != "6LQ07OAHBTOOEU2R9ANI2AT70K5O0RCG" { + t.Error(nsec3) + } +} + +func TestNsec3(t *testing.T) { + // examples taken from .nl + nsec3, _ := NewRR("39p91242oslggest5e6a7cci4iaeqvnk.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6 NS DS RRSIG") + if !nsec3.(*NSEC3).Cover("snasajsksasasa.nl.") { // 39p94jrinub66hnpem8qdpstrec86pg3 + t.Error("39p94jrinub66hnpem8qdpstrec86pg3. should be covered by 39p91242oslggest5e6a7cci4iaeqvnk.nl. - 39P99DCGG0MDLARTCRMCF6OFLLUL7PR6") + } + nsec3, _ = NewRR("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. IN NSEC3 1 1 5 F10E9F7EA83FC8F3 SK4F38CQ0ATIEI8MH3RGD0P5I4II6QAN NS SOA TXT RRSIG DNSKEY NSEC3PARAM") + if !nsec3.(*NSEC3).Match("nl.") { // sk4e8fj94u78smusb40o1n0oltbblu2r.nl. + t.Error("sk4e8fj94u78smusb40o1n0oltbblu2r.nl. should match sk4e8fj94u78smusb40o1n0oltbblu2r.nl.") + } +} diff --git a/vendor/github.com/miekg/dns/parse_test.go b/vendor/github.com/miekg/dns/parse_test.go new file mode 100644 index 0000000000..a700f64441 --- /dev/null +++ b/vendor/github.com/miekg/dns/parse_test.go @@ -0,0 +1,1524 @@ +package dns + +import ( + "bytes" + "crypto/rsa" + "encoding/hex" + "fmt" + "math/rand" + "net" + "reflect" + "strconv" + "strings" + "testing" + "testing/quick" + "time" +) + +func TestDotInName(t *testing.T) { + buf := make([]byte, 20) + PackDomainName("aa\\.bb.nl.", buf, 0, nil, false) + // index 3 must be a real dot + if buf[3] != '.' { + t.Error("dot should be a real dot") + } + + if buf[6] != 2 { + t.Error("this must have the value 2") + } + dom, _, _ := UnpackDomainName(buf, 0) + // printing it should yield the backspace again + if dom != "aa\\.bb.nl." { + t.Error("dot should have been escaped: ", dom) + } +} + +func TestDotLastInLabel(t *testing.T) { + sample := "aa\\..au." + buf := make([]byte, 20) + _, err := PackDomainName(sample, buf, 0, nil, false) + if err != nil { + t.Fatalf("unexpected error packing domain: %v", err) + } + dom, _, _ := UnpackDomainName(buf, 0) + if dom != sample { + t.Fatalf("unpacked domain `%s' doesn't match packed domain", dom) + } +} + +func TestTooLongDomainName(t *testing.T) { + l := "aaabbbcccdddeeefffggghhhiiijjjkkklllmmmnnnooopppqqqrrrsssttt." + dom := l + l + l + l + l + l + l + _, err := NewRR(dom + " IN A 127.0.0.1") + if err == nil { + t.Error("should be too long") + } else { + t.Logf("error is %v", err) + } + _, err = NewRR("..com. IN A 127.0.0.1") + if err == nil { + t.Error("should fail") + } else { + t.Logf("error is %v", err) + } +} + +func TestDomainName(t *testing.T) { + tests := []string{"r\\.gieben.miek.nl.", "www\\.www.miek.nl.", + "www.*.miek.nl.", "www.*.miek.nl.", + } + dbuff := make([]byte, 40) + + for _, ts := range tests { + if _, err := PackDomainName(ts, dbuff, 0, nil, false); err != nil { + t.Error("not a valid domain name") + continue + } + n, _, err := UnpackDomainName(dbuff, 0) + if err != nil { + t.Error("failed to unpack packed domain name") + continue + } + if ts != n { + t.Errorf("must be equal: in: %s, out: %s", ts, n) + } + } +} + +func TestDomainNameAndTXTEscapes(t *testing.T) { + tests := []byte{'.', '(', ')', ';', ' ', '@', '"', '\\', '\t', '\r', '\n', 0, 255} + for _, b := range tests { + rrbytes := []byte{ + 1, b, 0, // owner + byte(TypeTXT >> 8), byte(TypeTXT), + byte(ClassINET >> 8), byte(ClassINET), + 0, 0, 0, 1, // TTL + 0, 2, 1, b, // Data + } + rr1, _, err := UnpackRR(rrbytes, 0) + if err != nil { + panic(err) + } + s := rr1.String() + rr2, err := NewRR(s) + if err != nil { + t.Errorf("Error parsing unpacked RR's string: %v", err) + t.Errorf(" Bytes: %v", rrbytes) + t.Errorf("String: %v", s) + } + repacked := make([]byte, len(rrbytes)) + if _, err := PackRR(rr2, repacked, 0, nil, false); err != nil { + t.Errorf("error packing parsed RR: %v", err) + t.Errorf(" original Bytes: %v", rrbytes) + t.Errorf("unpacked Struct: %v", rr1) + t.Errorf(" parsed Struct: %v", rr2) + } + if !bytes.Equal(repacked, rrbytes) { + t.Error("packed bytes don't match original bytes") + t.Errorf(" original bytes: %v", rrbytes) + t.Errorf(" packed bytes: %v", repacked) + t.Errorf("unpacked struct: %v", rr1) + t.Errorf(" parsed struct: %v", rr2) + } + } +} + +func TestTXTEscapeParsing(t *testing.T) { + test := [][]string{ + {`";"`, `";"`}, + {`\;`, `";"`}, + {`"\t"`, `"\t"`}, + {`"\r"`, `"\r"`}, + {`"\ "`, `" "`}, + {`"\;"`, `";"`}, + {`"\;\""`, `";\""`}, + {`"\(a\)"`, `"(a)"`}, + {`"\(a)"`, `"(a)"`}, + {`"(a\)"`, `"(a)"`}, + {`"(a)"`, `"(a)"`}, + {`"\048"`, `"0"`}, + {`"\` + "\n" + `"`, `"\n"`}, + {`"\` + "\r" + `"`, `"\r"`}, + {`"\` + "\x11" + `"`, `"\017"`}, + {`"\'"`, `"'"`}, + } + for _, s := range test { + rr, err := NewRR(fmt.Sprintf("example.com. IN TXT %v", s[0])) + if err != nil { + t.Errorf("could not parse %v TXT: %s", s[0], err) + continue + } + + txt := sprintTxt(rr.(*TXT).Txt) + if txt != s[1] { + t.Errorf("mismatch after parsing `%v` TXT record: `%v` != `%v`", s[0], txt, s[1]) + } + } +} + +func GenerateDomain(r *rand.Rand, size int) []byte { + dnLen := size % 70 // artificially limit size so there's less to intrepret if a failure occurs + var dn []byte + done := false + for i := 0; i < dnLen && !done; { + max := dnLen - i + if max > 63 { + max = 63 + } + lLen := max + if lLen != 0 { + lLen = int(r.Int31()) % max + } + done = lLen == 0 + if done { + continue + } + l := make([]byte, lLen+1) + l[0] = byte(lLen) + for j := 0; j < lLen; j++ { + l[j+1] = byte(rand.Int31()) + } + dn = append(dn, l...) + i += 1 + lLen + } + return append(dn, 0) +} + +func TestDomainQuick(t *testing.T) { + r := rand.New(rand.NewSource(0)) + f := func(l int) bool { + db := GenerateDomain(r, l) + ds, _, err := UnpackDomainName(db, 0) + if err != nil { + panic(err) + } + buf := make([]byte, 255) + off, err := PackDomainName(ds, buf, 0, nil, false) + if err != nil { + t.Errorf("error packing domain: %v", err) + t.Errorf(" bytes: %v", db) + t.Errorf("string: %v", ds) + return false + } + if !bytes.Equal(db, buf[:off]) { + t.Errorf("repacked domain doesn't match original:") + t.Errorf("src bytes: %v", db) + t.Errorf(" string: %v", ds) + t.Errorf("out bytes: %v", buf[:off]) + return false + } + return true + } + if err := quick.Check(f, nil); err != nil { + t.Error(err) + } +} + +func GenerateTXT(r *rand.Rand, size int) []byte { + rdLen := size % 300 // artificially limit size so there's less to intrepret if a failure occurs + var rd []byte + for i := 0; i < rdLen; { + max := rdLen - 1 + if max > 255 { + max = 255 + } + sLen := max + if max != 0 { + sLen = int(r.Int31()) % max + } + s := make([]byte, sLen+1) + s[0] = byte(sLen) + for j := 0; j < sLen; j++ { + s[j+1] = byte(rand.Int31()) + } + rd = append(rd, s...) + i += 1 + sLen + } + return rd +} + +// Ok, 2 things. 1) this test breaks with the new functionality of splitting up larger txt +// chunks into 255 byte pieces. 2) I don't like the random nature of this thing, because I can't +// place the quotes where they need to be. +// So either add some code the places the quotes in just the right spots, make this non random +// or do something else. +// Disabled for now. (miek) +func testTXTRRQuick(t *testing.T) { + s := rand.NewSource(0) + r := rand.New(s) + typeAndClass := []byte{ + byte(TypeTXT >> 8), byte(TypeTXT), + byte(ClassINET >> 8), byte(ClassINET), + 0, 0, 0, 1, // TTL + } + f := func(l int) bool { + owner := GenerateDomain(r, l) + rdata := GenerateTXT(r, l) + rrbytes := make([]byte, 0, len(owner)+2+2+4+2+len(rdata)) + rrbytes = append(rrbytes, owner...) + rrbytes = append(rrbytes, typeAndClass...) + rrbytes = append(rrbytes, byte(len(rdata)>>8)) + rrbytes = append(rrbytes, byte(len(rdata))) + rrbytes = append(rrbytes, rdata...) + rr, _, err := UnpackRR(rrbytes, 0) + if err != nil { + panic(err) + } + buf := make([]byte, len(rrbytes)*3) + off, err := PackRR(rr, buf, 0, nil, false) + if err != nil { + t.Errorf("pack Error: %v\nRR: %v", err, rr) + return false + } + buf = buf[:off] + if !bytes.Equal(buf, rrbytes) { + t.Errorf("packed bytes don't match original bytes") + t.Errorf("src bytes: %v", rrbytes) + t.Errorf(" struct: %v", rr) + t.Errorf("out bytes: %v", buf) + return false + } + if len(rdata) == 0 { + // string'ing won't produce any data to parse + return true + } + rrString := rr.String() + rr2, err := NewRR(rrString) + if err != nil { + t.Errorf("error parsing own output: %v", err) + t.Errorf("struct: %v", rr) + t.Errorf("string: %v", rrString) + return false + } + if rr2.String() != rrString { + t.Errorf("parsed rr.String() doesn't match original string") + t.Errorf("original: %v", rrString) + t.Errorf(" parsed: %v", rr2.String()) + return false + } + + buf = make([]byte, len(rrbytes)*3) + off, err = PackRR(rr2, buf, 0, nil, false) + if err != nil { + t.Errorf("error packing parsed rr: %v", err) + t.Errorf("unpacked Struct: %v", rr) + t.Errorf(" string: %v", rrString) + t.Errorf(" parsed Struct: %v", rr2) + return false + } + buf = buf[:off] + if !bytes.Equal(buf, rrbytes) { + t.Errorf("parsed packed bytes don't match original bytes") + t.Errorf(" source bytes: %v", rrbytes) + t.Errorf("unpacked struct: %v", rr) + t.Errorf(" string: %v", rrString) + t.Errorf(" parsed struct: %v", rr2) + t.Errorf(" repacked bytes: %v", buf) + return false + } + return true + } + c := &quick.Config{MaxCountScale: 10} + if err := quick.Check(f, c); err != nil { + t.Error(err) + } +} + +func TestParseDirectiveMisc(t *testing.T) { + tests := map[string]string{ + "$ORIGIN miek.nl.\na IN NS b": "a.miek.nl.\t3600\tIN\tNS\tb.miek.nl.", + "$TTL 2H\nmiek.nl. IN NS b.": "miek.nl.\t7200\tIN\tNS\tb.", + "miek.nl. 1D IN NS b.": "miek.nl.\t86400\tIN\tNS\tb.", + `name. IN SOA a6.nstld.com. hostmaster.nic.name. ( + 203362132 ; serial + 5m ; refresh (5 minutes) + 5m ; retry (5 minutes) + 2w ; expire (2 weeks) + 300 ; minimum (5 minutes) +)`: "name.\t3600\tIN\tSOA\ta6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300", + ". 3600000 IN NS ONE.MY-ROOTS.NET.": ".\t3600000\tIN\tNS\tONE.MY-ROOTS.NET.", + "ONE.MY-ROOTS.NET. 3600000 IN A 192.168.1.1": "ONE.MY-ROOTS.NET.\t3600000\tIN\tA\t192.168.1.1", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestNSEC(t *testing.T) { + nsectests := map[string]string{ + "nl. IN NSEC3PARAM 1 0 5 30923C44C6CBBB8F": "nl.\t3600\tIN\tNSEC3PARAM\t1 0 5 30923C44C6CBBB8F", + "p2209hipbpnm681knjnu0m1febshlv4e.nl. IN NSEC3 1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM": "p2209hipbpnm681knjnu0m1febshlv4e.nl.\t3600\tIN\tNSEC3\t1 1 5 30923C44C6CBBB8F P90DG1KE8QEAN0B01613LHQDG0SOJ0TA NS SOA TXT RRSIG DNSKEY NSEC3PARAM", + "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC", + "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSEC TYPE65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", + "localhost.dnssex.nl. IN NSEC www.dnssex.nl. A RRSIG NSec Type65534": "localhost.dnssex.nl.\t3600\tIN\tNSEC\twww.dnssex.nl. A RRSIG NSEC TYPE65534", + } + for i, o := range nsectests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseLOC(t *testing.T) { + lt := map[string]string{ + "SW1A2AA.find.me.uk. LOC 51 30 12.748 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 30 12.748 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", + "SW1A2AA.find.me.uk. LOC 51 0 0.0 N 00 07 39.611 W 0.00m 0.00m 0.00m 0.00m": "SW1A2AA.find.me.uk.\t3600\tIN\tLOC\t51 00 0.000 N 00 07 39.611 W 0m 0.00m 0.00m 0.00m", + } + for i, o := range lt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseDS(t *testing.T) { + dt := map[string]string{ + "example.net. 3600 IN DS 40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B 2071398F": "example.net.\t3600\tIN\tDS\t40692 12 3 22261A8B0E0D799183E35E24E2AD6BB58533CBA7E3B14D659E9CA09B2071398F", + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestQuotes(t *testing.T) { + tests := map[string]string{ + `t.example.com. IN TXT "a bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a bc\"", + `t.example.com. IN TXT "a + bc"`: "t.example.com.\t3600\tIN\tTXT\t\"a\\n bc\"", + `t.example.com. IN TXT ""`: "t.example.com.\t3600\tIN\tTXT\t\"\"", + `t.example.com. IN TXT "a"`: "t.example.com.\t3600\tIN\tTXT\t\"a\"", + `t.example.com. IN TXT "aa"`: "t.example.com.\t3600\tIN\tTXT\t\"aa\"", + `t.example.com. IN TXT "aaa" ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", + `t.example.com. IN TXT "abc" "DEF"`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", + `t.example.com. IN TXT "abc" ( "DEF" )`: "t.example.com.\t3600\tIN\tTXT\t\"abc\" \"DEF\"", + `t.example.com. IN TXT aaa ;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa \"", + `t.example.com. IN TXT aaa aaa;`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"", + `t.example.com. IN TXT aaa aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa aaa\"", + `t.example.com. IN TXT aaa`: "t.example.com.\t3600\tIN\tTXT\t\"aaa\"", + "cid.urn.arpa. NAPTR 100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"z3950+I2L+I2C\" \"\" _z3950._tcp.gatech.edu.", + "cid.urn.arpa. NAPTR 100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"rcds+I2C\" \"\" _rcds._udp.gatech.edu.", + "cid.urn.arpa. NAPTR 100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 50 \"s\" \"http+I2L+I2C+I2R\" \"\" _http._tcp.gatech.edu.", + "cid.urn.arpa. NAPTR 100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .": "cid.urn.arpa.\t3600\tIN\tNAPTR\t100 10 \"\" \"\" \"/urn:cid:.+@([^\\.]+\\.)(.*)$/\\2/i\" .", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseClass(t *testing.T) { + tests := map[string]string{ + "t.example.com. IN A 127.0.0.1": "t.example.com. 3600 IN A 127.0.0.1", + "t.example.com. CS A 127.0.0.1": "t.example.com. 3600 CS A 127.0.0.1", + "t.example.com. CH A 127.0.0.1": "t.example.com. 3600 CH A 127.0.0.1", + // ClassANY can not occur in zone files + // "t.example.com. ANY A 127.0.0.1": "t.example.com. 3600 ANY A 127.0.0.1", + "t.example.com. NONE A 127.0.0.1": "t.example.com. 3600 NONE A 127.0.0.1", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is\n`%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestBrace(t *testing.T) { + tests := map[string]string{ + "(miek.nl.) 3600 IN A 127.0.1.1": "miek.nl.\t3600\tIN\tA\t127.0.1.1", + "miek.nl. (3600) IN MX (10) elektron.atoom.net.": "miek.nl.\t3600\tIN\tMX\t10 elektron.atoom.net.", + `miek.nl. IN ( + 3600 A 127.0.0.1)`: "miek.nl.\t3600\tIN\tA\t127.0.0.1", + "(miek.nl.) (A) (127.0.2.1)": "miek.nl.\t3600\tIN\tA\t127.0.2.1", + "miek.nl A 127.0.3.1": "miek.nl.\t3600\tIN\tA\t127.0.3.1", + "_ssh._tcp.local. 60 IN (PTR) stora._ssh._tcp.local.": "_ssh._tcp.local.\t60\tIN\tPTR\tstora._ssh._tcp.local.", + "miek.nl. NS ns.miek.nl": "miek.nl.\t3600\tIN\tNS\tns.miek.nl.", + `(miek.nl.) ( + (IN) + (AAAA) + (::1) )`: "miek.nl.\t3600\tIN\tAAAA\t::1", + `(miek.nl.) ( + (IN) + (AAAA) + (::1))`: "miek.nl.\t3600\tIN\tAAAA\t::1", + "miek.nl. IN AAAA ::2": "miek.nl.\t3600\tIN\tAAAA\t::2", + `((m)(i)ek.(n)l.) (SOA) (soa.) (soa.) ( + 2009032802 ; serial + 21600 ; refresh (6 hours) + 7(2)00 ; retry (2 hours) + 604()800 ; expire (1 week) + 3600 ; minimum (1 hour) + )`: "miek.nl.\t3600\tIN\tSOA\tsoa. soa. 2009032802 21600 7200 604800 3600", + "miek\\.nl. IN A 127.0.0.10": "miek\\.nl.\t3600\tIN\tA\t127.0.0.10", + "miek.nl. IN A 127.0.0.11": "miek.nl.\t3600\tIN\tA\t127.0.0.11", + "miek.nl. A 127.0.0.12": "miek.nl.\t3600\tIN\tA\t127.0.0.12", + `miek.nl. 86400 IN SOA elektron.atoom.net. miekg.atoom.net. ( + 2009032802 ; serial + 21600 ; refresh (6 hours) + 7200 ; retry (2 hours) + 604800 ; expire (1 week) + 3600 ; minimum (1 hour) + )`: "miek.nl.\t86400\tIN\tSOA\telektron.atoom.net. miekg.atoom.net. 2009032802 21600 7200 604800 3600", + } + for i, o := range tests { + rr, err := NewRR(i) + if err != nil { + t.Errorf("failed to parse RR: %v\n\t%s", err, i) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseFailure(t *testing.T) { + tests := []string{"miek.nl. IN A 327.0.0.1", + "miek.nl. IN AAAA ::x", + "miek.nl. IN MX a0 miek.nl.", + "miek.nl aap IN MX mx.miek.nl.", + "miek.nl 200 IN mxx 10 mx.miek.nl.", + "miek.nl. inn MX 10 mx.miek.nl.", + // "miek.nl. IN CNAME ", // actually valid nowadays, zero size rdata + "miek.nl. IN CNAME ..", + "miek.nl. PA MX 10 miek.nl.", + "miek.nl. ) IN MX 10 miek.nl.", + } + + for _, s := range tests { + _, err := NewRR(s) + if err == nil { + t.Errorf("should have triggered an error: \"%s\"", s) + } + } +} + +func TestZoneParsing(t *testing.T) { + // parse_test.db + db := ` +a.example.com. IN A 127.0.0.1 +8db7._openpgpkey.example.com. IN OPENPGPKEY mQCNAzIG +$ORIGIN a.example.com. +test IN A 127.0.0.1 + IN SSHFP 1 2 ( + BC6533CDC95A79078A39A56EA7635984ED655318ADA9 + B6159E30723665DA95BB ) +$ORIGIN b.example.com. +test IN CNAME test.a.example.com. +` + start := time.Now().UnixNano() + to := ParseZone(strings.NewReader(db), "", "parse_test.db") + var i int + for x := range to { + i++ + if x.Error != nil { + t.Error(x.Error) + continue + } + t.Log(x.RR) + } + delta := time.Now().UnixNano() - start + t.Logf("%d RRs parsed in %.2f s (%.2f RR/s)", i, float32(delta)/1e9, float32(i)/(float32(delta)/1e9)) +} + +func ExampleParseZone() { + zone := `$ORIGIN . +$TTL 3600 ; 1 hour +name IN SOA a6.nstld.com. hostmaster.nic.name. ( + 203362132 ; serial + 300 ; refresh (5 minutes) + 300 ; retry (5 minutes) + 1209600 ; expire (2 weeks) + 300 ; minimum (5 minutes) + ) +$TTL 10800 ; 3 hours +name. 10800 IN NS name. + IN NS g6.nstld.com. + 7200 NS h6.nstld.com. + 3600 IN NS j6.nstld.com. + IN 3600 NS k6.nstld.com. + NS l6.nstld.com. + NS a6.nstld.com. + NS c6.nstld.com. + NS d6.nstld.com. + NS f6.nstld.com. + NS m6.nstld.com. +( + NS m7.nstld.com. +) +$ORIGIN name. +0-0onlus NS ns7.ehiweb.it. + NS ns8.ehiweb.it. +0-g MX 10 mx01.nic + MX 10 mx02.nic + MX 10 mx03.nic + MX 10 mx04.nic +$ORIGIN 0-g.name +moutamassey NS ns01.yahoodomains.jp. + NS ns02.yahoodomains.jp. +` + to := ParseZone(strings.NewReader(zone), "", "testzone") + for x := range to { + fmt.Println(x.RR) + } + // Output: + // name. 3600 IN SOA a6.nstld.com. hostmaster.nic.name. 203362132 300 300 1209600 300 + // name. 10800 IN NS name. + // name. 10800 IN NS g6.nstld.com. + // name. 7200 IN NS h6.nstld.com. + // name. 3600 IN NS j6.nstld.com. + // name. 3600 IN NS k6.nstld.com. + // name. 10800 IN NS l6.nstld.com. + // name. 10800 IN NS a6.nstld.com. + // name. 10800 IN NS c6.nstld.com. + // name. 10800 IN NS d6.nstld.com. + // name. 10800 IN NS f6.nstld.com. + // name. 10800 IN NS m6.nstld.com. + // name. 10800 IN NS m7.nstld.com. + // 0-0onlus.name. 10800 IN NS ns7.ehiweb.it. + // 0-0onlus.name. 10800 IN NS ns8.ehiweb.it. + // 0-g.name. 10800 IN MX 10 mx01.nic.name. + // 0-g.name. 10800 IN MX 10 mx02.nic.name. + // 0-g.name. 10800 IN MX 10 mx03.nic.name. + // 0-g.name. 10800 IN MX 10 mx04.nic.name. + // moutamassey.0-g.name.name. 10800 IN NS ns01.yahoodomains.jp. + // moutamassey.0-g.name.name. 10800 IN NS ns02.yahoodomains.jp. +} + +func ExampleHIP() { + h := `www.example.com IN HIP ( 2 200100107B1A74DF365639CC39F1D578 + AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p +9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ +b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D + rvs.example.com. )` + if hip, err := NewRR(h); err == nil { + fmt.Println(hip.String()) + } + // Output: + // www.example.com. 3600 IN HIP 2 200100107B1A74DF365639CC39F1D578 AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQb1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D rvs.example.com. +} + +func TestHIP(t *testing.T) { + h := `www.example.com. IN HIP ( 2 200100107B1A74DF365639CC39F1D578 + AwEAAbdxyhNuSutc5EMzxTs9LBPCIkOFH8cIvM4p +9+LrV4e19WzK00+CI6zBCQTdtWsuxKbWIy87UOoJTwkUs7lBu+Upr1gsNrut79ryra+bSRGQ +b1slImA8YVJyuIDsj7kwzG7jnERNqnWxZ48AWkskmdHaVDP4BcelrTI3rMXdXF5D + rvs1.example.com. + rvs2.example.com. )` + rr, err := NewRR(h) + if err != nil { + t.Fatalf("failed to parse RR: %v", err) + } + t.Logf("RR: %s", rr) + msg := new(Msg) + msg.Answer = []RR{rr, rr} + bytes, err := msg.Pack() + if err != nil { + t.Fatalf("failed to pack msg: %v", err) + } + if err := msg.Unpack(bytes); err != nil { + t.Fatalf("failed to unpack msg: %v", err) + } + if len(msg.Answer) != 2 { + t.Fatalf("2 answers expected: %v", msg) + } + for i, rr := range msg.Answer { + rr := rr.(*HIP) + t.Logf("RR: %s", rr) + if l := len(rr.RendezvousServers); l != 2 { + t.Fatalf("2 servers expected, only %d in record %d:\n%v", l, i, msg) + } + for j, s := range []string{"rvs1.example.com.", "rvs2.example.com."} { + if rr.RendezvousServers[j] != s { + t.Fatalf("expected server %d of record %d to be %s:\n%v", j, i, s, msg) + } + } + } +} + +func ExampleSOA() { + s := "example.com. 1000 SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100" + if soa, err := NewRR(s); err == nil { + fmt.Println(soa.String()) + } + // Output: + // example.com. 1000 IN SOA master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100 +} + +func TestLineNumberError(t *testing.T) { + s := "example.com. 1000 SOA master.example.com. admin.example.com. monkey 4294967294 4294967293 4294967295 100" + if _, err := NewRR(s); err != nil { + if err.Error() != "dns: bad SOA zone parameter: \"monkey\" at line: 1:68" { + t.Error("not expecting this error: ", err) + } + } +} + +// Test with no known RR on the line +func TestLineNumberError2(t *testing.T) { + tests := map[string]string{ + "example.com. 1000 SO master.example.com. admin.example.com. 1 4294967294 4294967293 4294967295 100": "dns: expecting RR type or class, not this...: \"SO\" at line: 1:21", + "example.com 1000 IN TALINK a.example.com. b..example.com.": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:57", + "example.com 1000 IN TALINK ( a.example.com. b..example.com. )": "dns: bad TALINK NextName: \"b..example.com.\" at line: 1:60", + `example.com 1000 IN TALINK ( a.example.com. + bb..example.com. )`: "dns: bad TALINK NextName: \"bb..example.com.\" at line: 2:18", + // This is a bug, it should report an error on line 1, but the new is already processed. + `example.com 1000 IN TALINK ( a.example.com. b...example.com. + )`: "dns: bad TALINK NextName: \"b...example.com.\" at line: 2:1"} + + for in, errStr := range tests { + _, err := NewRR(in) + if err == nil { + t.Error("err is nil") + } else { + if err.Error() != errStr { + t.Errorf("%s: error should be %s is %v", in, errStr, err) + } + } + } +} + +// Test if the calculations are correct +func TestRfc1982(t *testing.T) { + // If the current time and the timestamp are more than 68 years apart + // it means the date has wrapped. 0 is 1970 + + // fall in the current 68 year span + strtests := []string{"20120525134203", "19700101000000", "20380119031408"} + for _, v := range strtests { + if x, _ := StringToTime(v); v != TimeToString(x) { + t.Errorf("1982 arithmetic string failure %s (%s:%d)", v, TimeToString(x), x) + } + } + + inttests := map[uint32]string{0: "19700101000000", + 1 << 31: "20380119031408", + 1<<32 - 1: "21060207062815", + } + for i, v := range inttests { + if TimeToString(i) != v { + t.Errorf("1982 arithmetic int failure %d:%s (%s)", i, v, TimeToString(i)) + } + } + + // Future tests, these dates get parsed to a date within the current 136 year span + future := map[string]string{"22680119031408": "20631123173144", + "19010101121212": "20370206184028", + "19210101121212": "20570206184028", + "19500101121212": "20860206184028", + "19700101000000": "19700101000000", + "19690101000000": "21050207062816", + "29210101121212": "21040522212236", + } + for from, to := range future { + x, _ := StringToTime(from) + y := TimeToString(x) + if y != to { + t.Errorf("1982 arithmetic future failure %s:%s (%s)", from, to, y) + } + } +} + +func TestEmpty(t *testing.T) { + for range ParseZone(strings.NewReader(""), "", "") { + t.Errorf("should be empty") + } +} + +func TestLowercaseTokens(t *testing.T) { + var testrecords = []string{ + "example.org. 300 IN a 1.2.3.4", + "example.org. 300 in A 1.2.3.4", + "example.org. 300 in a 1.2.3.4", + "example.org. 300 a 1.2.3.4", + "example.org. 300 A 1.2.3.4", + "example.org. IN a 1.2.3.4", + "example.org. in A 1.2.3.4", + "example.org. in a 1.2.3.4", + "example.org. a 1.2.3.4", + "example.org. A 1.2.3.4", + "example.org. a 1.2.3.4", + "$ORIGIN example.org.\n a 1.2.3.4", + "$Origin example.org.\n a 1.2.3.4", + "$origin example.org.\n a 1.2.3.4", + "example.org. Class1 Type1 1.2.3.4", + } + for _, testrr := range testrecords { + _, err := NewRR(testrr) + if err != nil { + t.Errorf("failed to parse %#v, got %v", testrr, err) + } + } +} + +func ExampleParseZone_generate() { + // From the manual: http://www.bind9.net/manual/bind/9.3.2/Bv9ARM.ch06.html#id2566761 + zone := "$GENERATE 1-2 0 NS SERVER$.EXAMPLE.\n$GENERATE 1-8 $ CNAME $.0" + to := ParseZone(strings.NewReader(zone), "0.0.192.IN-ADDR.ARPA.", "") + for x := range to { + if x.Error == nil { + fmt.Println(x.RR.String()) + } + } + // Output: + // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER1.EXAMPLE. + // 0.0.0.192.IN-ADDR.ARPA. 3600 IN NS SERVER2.EXAMPLE. + // 1.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 1.0.0.0.192.IN-ADDR.ARPA. + // 2.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 2.0.0.0.192.IN-ADDR.ARPA. + // 3.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 3.0.0.0.192.IN-ADDR.ARPA. + // 4.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 4.0.0.0.192.IN-ADDR.ARPA. + // 5.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 5.0.0.0.192.IN-ADDR.ARPA. + // 6.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 6.0.0.0.192.IN-ADDR.ARPA. + // 7.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 7.0.0.0.192.IN-ADDR.ARPA. + // 8.0.0.192.IN-ADDR.ARPA. 3600 IN CNAME 8.0.0.0.192.IN-ADDR.ARPA. +} + +func TestSRVPacking(t *testing.T) { + msg := Msg{} + + things := []string{"1.2.3.4:8484", + "45.45.45.45:8484", + "84.84.84.84:8484", + } + + for i, n := range things { + h, p, err := net.SplitHostPort(n) + if err != nil { + continue + } + port := 8484 + tmp, err := strconv.Atoi(p) + if err == nil { + port = tmp + } + + rr := &SRV{ + Hdr: RR_Header{Name: "somename.", + Rrtype: TypeSRV, + Class: ClassINET, + Ttl: 5}, + Priority: uint16(i), + Weight: 5, + Port: uint16(port), + Target: h + ".", + } + + msg.Answer = append(msg.Answer, rr) + } + + _, err := msg.Pack() + if err != nil { + t.Fatalf("couldn't pack %v: %v", msg, err) + } +} + +func TestParseBackslash(t *testing.T) { + if r, err := NewRR("nul\\000gap.test.globnix.net. 600 IN A 192.0.2.10"); err != nil { + t.Errorf("could not create RR with \\000 in it") + } else { + t.Logf("parsed %s", r.String()) + } + if r, err := NewRR(`nul\000gap.test.globnix.net. 600 IN TXT "Hello\123"`); err != nil { + t.Errorf("could not create RR with \\000 in it") + } else { + t.Logf("parsed %s", r.String()) + } + if r, err := NewRR(`m\ @\ iek.nl. IN 3600 A 127.0.0.1`); err != nil { + t.Errorf("could not create RR with \\ and \\@ in it") + } else { + t.Logf("parsed %s", r.String()) + } +} + +func TestILNP(t *testing.T) { + tests := []string{ + "host1.example.com.\t3600\tIN\tNID\t10 0014:4fff:ff20:ee64", + "host1.example.com.\t3600\tIN\tNID\t20 0015:5fff:ff21:ee65", + "host2.example.com.\t3600\tIN\tNID\t10 0016:6fff:ff22:ee66", + "host1.example.com.\t3600\tIN\tL32\t10 10.1.2.0", + "host1.example.com.\t3600\tIN\tL32\t20 10.1.4.0", + "host2.example.com.\t3600\tIN\tL32\t10 10.1.8.0", + "host1.example.com.\t3600\tIN\tL64\t10 2001:0DB8:1140:1000", + "host1.example.com.\t3600\tIN\tL64\t20 2001:0DB8:2140:2000", + "host2.example.com.\t3600\tIN\tL64\t10 2001:0DB8:4140:4000", + "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet1.example.com.", + "host1.example.com.\t3600\tIN\tLP\t10 l64-subnet2.example.com.", + "host1.example.com.\t3600\tIN\tLP\t20 l32-subnet1.example.com.", + } + for _, t1 := range tests { + r, err := NewRR(t1) + if err != nil { + t.Fatalf("an error occurred: %v", err) + } else { + if t1 != r.String() { + t.Fatalf("strings should be equal %s %s", t1, r.String()) + } + } + } +} + +func TestGposEidNimloc(t *testing.T) { + dt := map[string]string{ + "444433332222111199990123000000ff. NSAP-PTR foo.bar.com.": "444433332222111199990123000000ff.\t3600\tIN\tNSAP-PTR\tfoo.bar.com.", + "lillee. IN GPOS -32.6882 116.8652 10.0": "lillee.\t3600\tIN\tGPOS\t-32.6882 116.8652 10.0", + "hinault. IN GPOS -22.6882 116.8652 250.0": "hinault.\t3600\tIN\tGPOS\t-22.6882 116.8652 250.0", + "VENERA. IN NIMLOC 75234159EAC457800920": "VENERA.\t3600\tIN\tNIMLOC\t75234159EAC457800920", + "VAXA. IN EID 3141592653589793": "VAXA.\t3600\tIN\tEID\t3141592653589793", + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestPX(t *testing.T) { + dt := map[string]string{ + "*.net2.it. IN PX 10 net2.it. PRMD-net2.ADMD-p400.C-it.": "*.net2.it.\t3600\tIN\tPX\t10 net2.it. PRMD-net2.ADMD-p400.C-it.", + "ab.net2.it. IN PX 10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.": "ab.net2.it.\t3600\tIN\tPX\t10 ab.net2.it. O-ab.PRMD-net2.ADMDb.C-it.", + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestComment(t *testing.T) { + // Comments we must see + comments := map[string]bool{"; this is comment 1": true, + "; this is comment 4": true, "; this is comment 6": true, + "; this is comment 7": true, "; this is comment 8": true} + zone := ` +foo. IN A 10.0.0.1 ; this is comment 1 +foo. IN A ( + 10.0.0.2 ; this is comment2 +) +; this is comment3 +foo. IN A 10.0.0.3 +foo. IN A ( 10.0.0.4 ); this is comment 4 + +foo. IN A 10.0.0.5 +; this is comment5 + +foo. IN A 10.0.0.6 + +foo. IN DNSKEY 256 3 5 AwEAAb+8l ; this is comment 6 +foo. IN NSEC miek.nl. TXT RRSIG NSEC; this is comment 7 +foo. IN TXT "THIS IS TEXT MAN"; this is comment 8 +` + for x := range ParseZone(strings.NewReader(zone), ".", "") { + if x.Error == nil { + if x.Comment != "" { + if _, ok := comments[x.Comment]; !ok { + t.Errorf("wrong comment %s", x.Comment) + } + } + } + } +} + +func TestEUIxx(t *testing.T) { + tests := map[string]string{ + "host.example. IN EUI48 00-00-5e-90-01-2a": "host.example.\t3600\tIN\tEUI48\t00-00-5e-90-01-2a", + "host.example. IN EUI64 00-00-5e-ef-00-00-00-2a": "host.example.\t3600\tIN\tEUI64\t00-00-5e-ef-00-00-00-2a", + } + for i, o := range tests { + r, err := NewRR(i) + if err != nil { + t.Errorf("failed to parse %s: %v", i, err) + } + if r.String() != o { + t.Errorf("want %s, got %s", o, r.String()) + } + } +} + +func TestUserRR(t *testing.T) { + tests := map[string]string{ + "host.example. IN UID 1234": "host.example.\t3600\tIN\tUID\t1234", + "host.example. IN GID 1234556": "host.example.\t3600\tIN\tGID\t1234556", + "host.example. IN UINFO \"Miek Gieben\"": "host.example.\t3600\tIN\tUINFO\t\"Miek Gieben\"", + } + for i, o := range tests { + r, err := NewRR(i) + if err != nil { + t.Errorf("failed to parse %s: %v", i, err) + } + if r.String() != o { + t.Errorf("want %s, got %s", o, r.String()) + } + } +} + +func TestTXT(t *testing.T) { + // Test single entry TXT record + rr, err := NewRR(`_raop._tcp.local. 60 IN TXT "single value"`) + if err != nil { + t.Error("failed to parse single value TXT record", err) + } else if rr, ok := rr.(*TXT); !ok { + t.Error("wrong type, record should be of type TXT") + } else { + if len(rr.Txt) != 1 { + t.Error("bad size of TXT value:", len(rr.Txt)) + } else if rr.Txt[0] != "single value" { + t.Error("bad single value") + } + if rr.String() != `_raop._tcp.local. 60 IN TXT "single value"` { + t.Error("bad representation of TXT record:", rr.String()) + } + if rr.len() != 28+1+12 { + t.Error("bad size of serialized record:", rr.len()) + } + } + + // Test multi entries TXT record + rr, err = NewRR(`_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"`) + if err != nil { + t.Error("failed to parse multi-values TXT record", err) + } else if rr, ok := rr.(*TXT); !ok { + t.Error("wrong type, record should be of type TXT") + } else { + if len(rr.Txt) != 4 { + t.Error("bad size of TXT multi-value:", len(rr.Txt)) + } else if rr.Txt[0] != "a=1" || rr.Txt[1] != "b=2" || rr.Txt[2] != "c=3" || rr.Txt[3] != "d=4" { + t.Error("bad values in TXT records") + } + if rr.String() != `_raop._tcp.local. 60 IN TXT "a=1" "b=2" "c=3" "d=4"` { + t.Error("bad representation of TXT multi value record:", rr.String()) + } + if rr.len() != 28+1+3+1+3+1+3+1+3 { + t.Error("bad size of serialized multi value record:", rr.len()) + } + } + + // Test empty-string in TXT record + rr, err = NewRR(`_raop._tcp.local. 60 IN TXT ""`) + if err != nil { + t.Error("failed to parse empty-string TXT record", err) + } else if rr, ok := rr.(*TXT); !ok { + t.Error("wrong type, record should be of type TXT") + } else { + if len(rr.Txt) != 1 { + t.Error("bad size of TXT empty-string value:", len(rr.Txt)) + } else if rr.Txt[0] != "" { + t.Error("bad value for empty-string TXT record") + } + if rr.String() != `_raop._tcp.local. 60 IN TXT ""` { + t.Error("bad representation of empty-string TXT record:", rr.String()) + } + if rr.len() != 28+1 { + t.Error("bad size of serialized record:", rr.len()) + } + } + + // Test TXT record with chunk larger than 255 bytes, they should be split up, by the parser + s := "" + for i := 0; i < 255; i++ { + s += "a" + } + s += "b" + rr, err = NewRR(`test.local. 60 IN TXT "` + s + `"`) + if err != nil { + t.Error("failed to parse empty-string TXT record", err) + } + if rr.(*TXT).Txt[1] != "b" { + t.Errorf("Txt should have two chunk, last one my be 'b', but is %s", rr.(*TXT).Txt[1]) + } + t.Log(rr.String()) +} + +func TestTypeXXXX(t *testing.T) { + _, err := NewRR("example.com IN TYPE1234 \\# 4 aabbccdd") + if err != nil { + t.Errorf("failed to parse TYPE1234 RR: %v", err) + } + _, err = NewRR("example.com IN TYPE655341 \\# 8 aabbccddaabbccdd") + if err == nil { + t.Errorf("this should not work, for TYPE655341") + } + _, err = NewRR("example.com IN TYPE1 \\# 4 0a000001") + if err == nil { + t.Errorf("this should not work") + } +} + +func TestPTR(t *testing.T) { + _, err := NewRR("144.2.0.192.in-addr.arpa. 900 IN PTR ilouse03146p0\\(.example.com.") + if err != nil { + t.Error("failed to parse ", err) + } +} + +func TestDigit(t *testing.T) { + tests := map[string]byte{ + "miek\\000.nl. 100 IN TXT \"A\"": 0, + "miek\\001.nl. 100 IN TXT \"A\"": 1, + "miek\\254.nl. 100 IN TXT \"A\"": 254, + "miek\\255.nl. 100 IN TXT \"A\"": 255, + "miek\\256.nl. 100 IN TXT \"A\"": 0, + "miek\\257.nl. 100 IN TXT \"A\"": 1, + "miek\\004.nl. 100 IN TXT \"A\"": 4, + } + for s, i := range tests { + r, err := NewRR(s) + buf := make([]byte, 40) + if err != nil { + t.Fatalf("failed to parse %v", err) + } + PackRR(r, buf, 0, nil, false) + t.Log(buf) + if buf[5] != i { + t.Fatalf("5 pos must be %d, is %d", i, buf[5]) + } + r1, _, _ := UnpackRR(buf, 0) + if r1.Header().Ttl != 100 { + t.Fatalf("TTL should %d, is %d", 100, r1.Header().Ttl) + } + } +} + +func TestParseRRSIGTimestamp(t *testing.T) { + tests := map[string]bool{ + `miek.nl. IN RRSIG SOA 8 2 43200 20140210031301 20140111031301 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, + `miek.nl. IN RRSIG SOA 8 2 43200 315565800 4102477800 12051 miek.nl. MVZUyrYwq0iZhMFDDnVXD2BvuNiUJjSYlJAgzyAE6CF875BMvvZa+Sb0 RlSCL7WODQSQHhCx/fegHhVVF+Iz8N8kOLrmXD1+jO3Bm6Prl5UhcsPx WTBsg/kmxbp8sR1kvH4oZJtVfakG3iDerrxNaf0sQwhZzyfJQAqpC7pcBoc=`: true, + } + for r := range tests { + _, err := NewRR(r) + if err != nil { + t.Error(err) + } + } +} + +func TestTxtEqual(t *testing.T) { + rr1 := new(TXT) + rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + rr1.Txt = []string{"a\"a", "\"", "b"} + rr2, _ := NewRR(rr1.String()) + if rr1.String() != rr2.String() { + // This is not an error, but keep this test. + t.Errorf("these two TXT records should match:\n%s\n%s", rr1.String(), rr2.String()) + } + t.Logf("%s\n%s", rr1.String(), rr2.String()) +} + +func TestTxtLong(t *testing.T) { + rr1 := new(TXT) + rr1.Hdr = RR_Header{Name: ".", Rrtype: TypeTXT, Class: ClassINET, Ttl: 0} + // Make a long txt record, this breaks when sending the packet, + // but not earlier. + rr1.Txt = []string{"start-"} + for i := 0; i < 200; i++ { + rr1.Txt[0] += "start-" + } + str := rr1.String() + if len(str) < len(rr1.Txt[0]) { + t.Error("string conversion should work") + } +} + +// Basically, don't crash. +func TestMalformedPackets(t *testing.T) { + var packets = []string{ + "0021641c0000000100000000000078787878787878787878787303636f6d0000100001", + } + + // com = 63 6f 6d + for _, packet := range packets { + data, _ := hex.DecodeString(packet) + // for _, v := range data { + // t.Log(v) + // } + var msg Msg + msg.Unpack(data) + // println(msg.String()) + } +} + +type algorithm struct { + name uint8 + bits int +} + +func TestNewPrivateKey(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + algorithms := []algorithm{ + {ECDSAP256SHA256, 256}, + {ECDSAP384SHA384, 384}, + {RSASHA1, 1024}, + {RSASHA256, 2048}, + {DSA, 1024}, + } + + for _, algo := range algorithms { + key := new(DNSKEY) + key.Hdr.Rrtype = TypeDNSKEY + key.Hdr.Name = "miek.nl." + key.Hdr.Class = ClassINET + key.Hdr.Ttl = 14400 + key.Flags = 256 + key.Protocol = 3 + key.Algorithm = algo.name + privkey, err := key.Generate(algo.bits) + if err != nil { + t.Fatal(err) + } + + newPrivKey, err := key.NewPrivateKey(key.PrivateKeyString(privkey)) + if err != nil { + t.Error(key.String()) + t.Error(key.PrivateKeyString(privkey)) + t.Fatal(err) + } + + switch newPrivKey := newPrivKey.(type) { + case *rsa.PrivateKey: + newPrivKey.Precompute() + } + + if !reflect.DeepEqual(privkey, newPrivKey) { + t.Errorf("[%v] Private keys differ:\n%#v\n%#v", AlgorithmToString[algo.name], privkey, newPrivKey) + } + } +} + +// special input test +func TestNewRRSpecial(t *testing.T) { + var ( + rr RR + err error + expect string + ) + + rr, err = NewRR("; comment") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("$ORIGIN foo.") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR(" ") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("\n") + expect = "" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr != nil { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } + + rr, err = NewRR("foo. A 1.1.1.1\nbar. A 2.2.2.2") + expect = "foo.\t3600\tIN\tA\t1.1.1.1" + if err != nil { + t.Errorf("unexpected err: %v", err) + } + if rr == nil || rr.String() != expect { + t.Errorf("unexpected result: [%s] != [%s]", rr, expect) + } +} + +func TestPrintfVerbsRdata(t *testing.T) { + x, _ := NewRR("www.miek.nl. IN MX 20 mx.miek.nl.") + if Field(x, 1) != "20" { + t.Errorf("should be 20") + } + if Field(x, 2) != "mx.miek.nl." { + t.Errorf("should be mx.miek.nl.") + } + + x, _ = NewRR("www.miek.nl. IN A 127.0.0.1") + if Field(x, 1) != "127.0.0.1" { + t.Errorf("should be 127.0.0.1") + } + + x, _ = NewRR("www.miek.nl. IN AAAA ::1") + if Field(x, 1) != "::1" { + t.Errorf("should be ::1") + } + + x, _ = NewRR("www.miek.nl. IN NSEC a.miek.nl. A NS SOA MX AAAA") + if Field(x, 1) != "a.miek.nl." { + t.Errorf("should be a.miek.nl.") + } + if Field(x, 2) != "A NS SOA MX AAAA" { + t.Errorf("should be A NS SOA MX AAAA") + } + + x, _ = NewRR("www.miek.nl. IN TXT \"first\" \"second\"") + if Field(x, 1) != "first second" { + t.Errorf("should be first second") + } + if Field(x, 0) != "" { + t.Errorf("should be empty") + } +} + +func TestParseIPSECKEY(t *testing.T) { + tests := []string{ + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 1 2 192.0.2.38 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 0 2 . AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "38.2.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.2.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 1 2 192.0.2.3 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "38.1.0.192.in-addr.arpa. 7200 IN IPSECKEY ( 10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "38.1.0.192.in-addr.arpa.\t7200\tIN\tIPSECKEY\t10 3 2 mygateway.example.com. AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + + "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0 7200 IN IPSECKEY ( 10 2 2 2001:0DB8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ== )", + "0.d.4.0.3.0.e.f.f.f.3.f.0.1.2.0.\t7200\tIN\tIPSECKEY\t10 2 2 2001:db8:0:8002::2000:1 AQNRU3mG7TVTO2BkR47usntb102uFJtugbo6BSGvgqt4AQ==", + } + for i := 0; i < len(tests)-1; i++ { + t1 := tests[i] + e1 := tests[i+1] + r, err := NewRR(t1) + if err != nil { + t.Errorf("failed to parse IPSECKEY %v", err) + continue + } + if r.String() != e1 { + t.Errorf("these two IPSECKEY records should match:\n%s\n%s", r.String(), e1) + } + i++ + } +} + +func TestParseTokenOverflow(t *testing.T) { + _, err := NewRR("_443._tcp.example.org. IN TLSA 0 0 0 308205e8308204d0a00302010202100411de8f53b462f6a5a861b712ec6b59300d06092a864886f70d01010b05003070310b300906035504061302555331153013060355040a130c446967694365727420496e6331193017060355040b13107777772e64696769636572742e636f6d312f302d06035504031326446967694365727420534841322048696768204173737572616e636520536572766572204341301e170d3134313130363030303030305a170d3135313131333132303030305a3081a5310b3009060355040613025553311330110603550408130a43616c69666f726e6961311430120603550407130b4c6f7320416e67656c6573313c303a060355040a1333496e7465726e657420436f72706f726174696f6e20666f722041737369676e6564204e616d657320616e64204e756d6265727331133011060355040b130a546563686e6f6c6f6779311830160603550403130f7777772e6578616d706c652e6f726730820122300d06092a864886f70d01010105000382010f003082010a02820101009e663f52a3d18cb67cdfed547408a4e47e4036538988da2798da3b6655f7240d693ed1cb3fe6d6ad3a9e657ff6efa86b83b0cad24e5d31ff2bf70ec3b78b213f1b4bf61bdc669cbbc07d67154128ca92a9b3cbb4213a836fb823ddd4d7cc04918314d25f06086fa9970ba17e357cca9b458c27eb71760ab95e3f9bc898ae89050ae4d09ba2f7e4259d9ff1e072a6971b18355a8b9e53670c3d5dbdbd283f93a764e71b3a4140ca0746090c08510e2e21078d7d07844bf9c03865b531a0bf2ee766bc401f6451c5a1e6f6fb5d5c1d6a97a0abe91ae8b02e89241e07353909ccd5b41c46de207c06801e08f20713603827f2ae3e68cf15ef881d7e0608f70742e30203010001a382024630820242301f0603551d230418301680145168ff90af0207753cccd9656462a212b859723b301d0603551d0e04160414b000a7f422e9b1ce216117c4c46e7164c8e60c553081810603551d11047a3078820f7777772e6578616d706c652e6f7267820b6578616d706c652e636f6d820b6578616d706c652e656475820b6578616d706c652e6e6574820b6578616d706c652e6f7267820f7777772e6578616d706c652e636f6d820f7777772e6578616d706c652e656475820f7777772e6578616d706c652e6e6574300e0603551d0f0101ff0404030205a0301d0603551d250416301406082b0601050507030106082b0601050507030230750603551d1f046e306c3034a032a030862e687474703a2f2f63726c332e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c3034a032a030862e687474703a2f2f63726c342e64696769636572742e636f6d2f736861322d68612d7365727665722d67332e63726c30420603551d20043b3039303706096086480186fd6c0101302a302806082b06010505070201161c68747470733a2f2f7777772e64696769636572742e636f6d2f43505330818306082b0601050507010104773075302406082b060105050730018618687474703a2f2f6f6373702e64696769636572742e636f6d304d06082b060105050730028641687474703a2f2f636163657274732e64696769636572742e636f6d2f446967694365727453484132486967684173737572616e636553657276657243412e637274300c0603551d130101ff04023000300d06092a864886f70d01010b050003820101005eac2124dedb3978a86ff3608406acb542d3cb54cb83facd63aec88144d6a1bf15dbf1f215c4a73e241e582365cba9ea50dd306541653b3513af1a0756c1b2720e8d112b34fb67181efad9c4609bdc670fb025fa6e6d42188161b026cf3089a08369c2f3609fc84bcc3479140c1922ede430ca8dbac2b2a3cdacb305ba15dc7361c4c3a5e6daa99cb446cb221b28078a7a944efba70d96f31ac143d959bccd2fd50e30c325ea2624fb6b6dbe9344dbcf133bfbd5b4e892d635dbf31596451672c6b65ba5ac9b3cddea92b35dab1065cae3c8cb6bb450a62ea2f72ea7c6bdc7b65fa09b012392543734083c7687d243f8d0375304d99ccd2e148966a8637a6797") + if err == nil { + t.Fatalf("token overflow should return an error") + } + t.Logf("err: %s\n", err) +} + +func TestParseTLSA(t *testing.T) { + lt := []string{ + "_443._tcp.example.org.\t3600\tIN\tTLSA\t1 1 1 c22be239f483c08957bc106219cc2d3ac1a308dfbbdd0a365f17b9351234cf00", + "_443._tcp.example.org.\t3600\tIN\tTLSA\t2 1 2 4e85f45179e9cd6e0e68e2eb5be2e85ec9b92d91c609caf3ef0315213e3f92ece92c38397a607214de95c7fadc0ad0f1c604a469a0387959745032c0d51492f3", + "_443._tcp.example.org.\t3600\tIN\tTLSA\t3 0 2 69ec8d2277360b215d0cd956b0e2747108dff34b27d461a41c800629e38ee6c2d1230cc9e8e36711330adc6766e6ff7c5fbb37f106f248337c1a20ad682888d2", + } + for _, o := range lt { + rr, err := NewRR(o) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", o, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseSSHFP(t *testing.T) { + lt := []string{ + "test.example.org.\t300\tSSHFP\t1 2 (\n" + + "\t\t\t\t\tBC6533CDC95A79078A39A56EA7635984ED655318ADA9\n" + + "\t\t\t\t\tB6159E30723665DA95BB )", + "test.example.org.\t300\tSSHFP\t1 2 ( BC6533CDC 95A79078A39A56EA7635984ED655318AD A9B6159E3072366 5DA95BB )", + } + result := "test.example.org.\t300\tIN\tSSHFP\t1 2 BC6533CDC95A79078A39A56EA7635984ED655318ADA9B6159E30723665DA95BB" + for _, o := range lt { + rr, err := NewRR(o) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != result { + t.Errorf("`%s' should be equal to\n\n`%s', but is \n`%s'", o, result, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseHINFO(t *testing.T) { + dt := map[string]string{ + "example.net. HINFO A B": "example.net. 3600 IN HINFO \"A\" \"B\"", + "example.net. HINFO \"A\" \"B\"": "example.net. 3600 IN HINFO \"A\" \"B\"", + "example.net. HINFO A B C D E F": "example.net. 3600 IN HINFO \"A\" \"B C D E F\"", + "example.net. HINFO AB": "example.net. 3600 IN HINFO \"AB\" \"\"", + // "example.net. HINFO PC-Intel-700mhz \"Redhat Linux 7.1\"": "example.net. 3600 IN HINFO \"PC-Intel-700mhz\" \"Redhat Linux 7.1\"", + // This one is recommended in Pro Bind book http://www.zytrax.com/books/dns/ch8/hinfo.html + // but effectively, even Bind would replace it to correctly formed text when you AXFR + // TODO: remove this set of comments or figure support for quoted/unquoted combinations in endingToTxtSlice function + } + for i, o := range dt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestParseCAA(t *testing.T) { + lt := map[string]string{ + "example.net. CAA 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", + "example.net. CAA 0 issuewild \"symantec.com; stuff\"": "example.net.\t3600\tIN\tCAA\t0 issuewild \"symantec.com; stuff\"", + "example.net. CAA 128 tbs \"critical\"": "example.net.\t3600\tIN\tCAA\t128 tbs \"critical\"", + "example.net. CAA 2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"": "example.net.\t3600\tIN\tCAA\t2 auth \"0>09\\006\\010+\\006\\001\\004\\001\\214y\\002\\003\\001\\006\\009`\\134H\\001e\\003\\004\\002\\001\\004 y\\209\\012\\221r\\220\\156Q\\218\\150\\150{\\166\\245:\\231\\182%\\157:\\133\\179}\\1923r\\238\\151\\255\\128q\\145\\002\\001\\000\"", + "example.net. TYPE257 0 issue \"symantec.com\"": "example.net.\t3600\tIN\tCAA\t0 issue \"symantec.com\"", + } + for i, o := range lt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} + +func TestPackCAA(t *testing.T) { + m := new(Msg) + record := new(CAA) + record.Hdr = RR_Header{Name: "example.com.", Rrtype: TypeCAA, Class: ClassINET, Ttl: 0} + record.Tag = "issue" + record.Value = "symantec.com" + record.Flag = 1 + + m.Answer = append(m.Answer, record) + bytes, err := m.Pack() + if err != nil { + t.Fatalf("failed to pack msg: %v", err) + } + if err := m.Unpack(bytes); err != nil { + t.Fatalf("failed to unpack msg: %v", err) + } + if len(m.Answer) != 1 { + t.Fatalf("incorrect number of answers unpacked") + } + rr := m.Answer[0].(*CAA) + if rr.Tag != "issue" { + t.Fatalf("invalid tag for unpacked answer") + } else if rr.Value != "symantec.com" { + t.Fatalf("invalid value for unpacked answer") + } else if rr.Flag != 1 { + t.Fatalf("invalid flag for unpacked answer") + } +} + +func TestParseURI(t *testing.T) { + lt := map[string]string{ + "_http._tcp. IN URI 10 1 \"http://www.example.com/path\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"http://www.example.com/path\"", + "_http._tcp. IN URI 10 1 \"\"": "_http._tcp.\t3600\tIN\tURI\t10 1 \"\"", + } + for i, o := range lt { + rr, err := NewRR(i) + if err != nil { + t.Error("failed to parse RR: ", err) + continue + } + if rr.String() != o { + t.Errorf("`%s' should be equal to\n`%s', but is `%s'", i, o, rr.String()) + } else { + t.Logf("RR is OK: `%s'", rr.String()) + } + } +} diff --git a/vendor/github.com/miekg/dns/privaterr_test.go b/vendor/github.com/miekg/dns/privaterr_test.go new file mode 100644 index 0000000000..1d99dc93e4 --- /dev/null +++ b/vendor/github.com/miekg/dns/privaterr_test.go @@ -0,0 +1,170 @@ +package dns_test + +import ( + "strings" + "testing" + + "github.com/miekg/dns" +) + +const TypeISBN uint16 = 0x0F01 + +// A crazy new RR type :) +type ISBN struct { + x string // rdata with 10 or 13 numbers, dashes or spaces allowed +} + +func NewISBN() dns.PrivateRdata { return &ISBN{""} } + +func (rd *ISBN) Len() int { return len([]byte(rd.x)) } +func (rd *ISBN) String() string { return rd.x } + +func (rd *ISBN) Parse(txt []string) error { + rd.x = strings.TrimSpace(strings.Join(txt, " ")) + return nil +} + +func (rd *ISBN) Pack(buf []byte) (int, error) { + b := []byte(rd.x) + n := copy(buf, b) + if n != len(b) { + return n, dns.ErrBuf + } + return n, nil +} + +func (rd *ISBN) Unpack(buf []byte) (int, error) { + rd.x = string(buf) + return len(buf), nil +} + +func (rd *ISBN) Copy(dest dns.PrivateRdata) error { + isbn, ok := dest.(*ISBN) + if !ok { + return dns.ErrRdata + } + isbn.x = rd.x + return nil +} + +var testrecord = strings.Join([]string{"example.org.", "3600", "IN", "ISBN", "12-3 456789-0-123"}, "\t") + +func TestPrivateText(t *testing.T) { + dns.PrivateHandle("ISBN", TypeISBN, NewISBN) + defer dns.PrivateHandleRemove(TypeISBN) + + rr, err := dns.NewRR(testrecord) + if err != nil { + t.Fatal(err) + } + if rr.String() != testrecord { + t.Errorf("record string representation did not match original %#v != %#v", rr.String(), testrecord) + } else { + t.Log(rr.String()) + } +} + +func TestPrivateByteSlice(t *testing.T) { + dns.PrivateHandle("ISBN", TypeISBN, NewISBN) + defer dns.PrivateHandleRemove(TypeISBN) + + rr, err := dns.NewRR(testrecord) + if err != nil { + t.Fatal(err) + } + + buf := make([]byte, 100) + off, err := dns.PackRR(rr, buf, 0, nil, false) + if err != nil { + t.Errorf("got error packing ISBN: %v", err) + } + + custrr := rr.(*dns.PrivateRR) + if ln := custrr.Data.Len() + len(custrr.Header().Name) + 11; ln != off { + t.Errorf("offset is not matching to length of Private RR: %d!=%d", off, ln) + } + + rr1, off1, err := dns.UnpackRR(buf[:off], 0) + if err != nil { + t.Errorf("got error unpacking ISBN: %v", err) + } + + if off1 != off { + t.Errorf("offset after unpacking differs: %d != %d", off1, off) + } + + if rr1.String() != testrecord { + t.Errorf("record string representation did not match original %#v != %#v", rr1.String(), testrecord) + } else { + t.Log(rr1.String()) + } +} + +const TypeVERSION uint16 = 0x0F02 + +type VERSION struct { + x string +} + +func NewVersion() dns.PrivateRdata { return &VERSION{""} } + +func (rd *VERSION) String() string { return rd.x } +func (rd *VERSION) Parse(txt []string) error { + rd.x = strings.TrimSpace(strings.Join(txt, " ")) + return nil +} + +func (rd *VERSION) Pack(buf []byte) (int, error) { + b := []byte(rd.x) + n := copy(buf, b) + if n != len(b) { + return n, dns.ErrBuf + } + return n, nil +} + +func (rd *VERSION) Unpack(buf []byte) (int, error) { + rd.x = string(buf) + return len(buf), nil +} + +func (rd *VERSION) Copy(dest dns.PrivateRdata) error { + isbn, ok := dest.(*VERSION) + if !ok { + return dns.ErrRdata + } + isbn.x = rd.x + return nil +} + +func (rd *VERSION) Len() int { + return len([]byte(rd.x)) +} + +var smallzone = `$ORIGIN example.org. +@ SOA sns.dns.icann.org. noc.dns.icann.org. ( + 2014091518 7200 3600 1209600 3600 +) + A 1.2.3.4 +ok ISBN 1231-92110-12 +go VERSION ( + 1.3.1 ; comment +) +www ISBN 1231-92110-16 +* CNAME @ +` + +func TestPrivateZoneParser(t *testing.T) { + dns.PrivateHandle("ISBN", TypeISBN, NewISBN) + dns.PrivateHandle("VERSION", TypeVERSION, NewVersion) + defer dns.PrivateHandleRemove(TypeISBN) + defer dns.PrivateHandleRemove(TypeVERSION) + + r := strings.NewReader(smallzone) + for x := range dns.ParseZone(r, ".", "") { + if err := x.Error; err != nil { + t.Fatal(err) + } + t.Log(x.RR) + } +} diff --git a/vendor/github.com/miekg/dns/remote_test.go b/vendor/github.com/miekg/dns/remote_test.go new file mode 100644 index 0000000000..4cf701fe43 --- /dev/null +++ b/vendor/github.com/miekg/dns/remote_test.go @@ -0,0 +1,19 @@ +package dns + +import "testing" + +const LinodeAddr = "176.58.119.54:53" + +func TestClientRemote(t *testing.T) { + m := new(Msg) + m.SetQuestion("go.dns.miek.nl.", TypeTXT) + + c := new(Client) + r, _, err := c.Exchange(m, LinodeAddr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + if r != nil && r.Rcode != RcodeSuccess { + t.Errorf("failed to get an valid answer\n%v", r) + } +} diff --git a/vendor/github.com/miekg/dns/sanitize_test.go b/vendor/github.com/miekg/dns/sanitize_test.go new file mode 100644 index 0000000000..22d8e87987 --- /dev/null +++ b/vendor/github.com/miekg/dns/sanitize_test.go @@ -0,0 +1,85 @@ +package dns + +import "testing" + +func TestDedup(t *testing.T) { + // make it []string + testcases := map[[3]RR][]string{ + [...]RR{ + newRR(t, "mIek.nl. IN A 127.0.0.1"), + newRR(t, "mieK.nl. IN A 127.0.0.1"), + newRR(t, "miek.Nl. IN A 127.0.0.1"), + }: {"mIek.nl.\t3600\tIN\tA\t127.0.0.1"}, + [...]RR{ + newRR(t, "miEk.nl. 2000 IN A 127.0.0.1"), + newRR(t, "mieK.Nl. 1000 IN A 127.0.0.1"), + newRR(t, "Miek.nL. 500 IN A 127.0.0.1"), + }: {"miEk.nl.\t500\tIN\tA\t127.0.0.1"}, + [...]RR{ + newRR(t, "miek.nl. IN A 127.0.0.1"), + newRR(t, "miek.nl. CH A 127.0.0.1"), + newRR(t, "miek.nl. IN A 127.0.0.1"), + }: {"miek.nl.\t3600\tIN\tA\t127.0.0.1", + "miek.nl.\t3600\tCH\tA\t127.0.0.1", + }, + [...]RR{ + newRR(t, "miek.nl. CH A 127.0.0.1"), + newRR(t, "miek.nl. IN A 127.0.0.1"), + newRR(t, "miek.de. IN A 127.0.0.1"), + }: {"miek.nl.\t3600\tCH\tA\t127.0.0.1", + "miek.nl.\t3600\tIN\tA\t127.0.0.1", + "miek.de.\t3600\tIN\tA\t127.0.0.1", + }, + [...]RR{ + newRR(t, "miek.de. IN A 127.0.0.1"), + newRR(t, "miek.nl. 200 IN A 127.0.0.1"), + newRR(t, "miek.nl. 300 IN A 127.0.0.1"), + }: {"miek.de.\t3600\tIN\tA\t127.0.0.1", + "miek.nl.\t200\tIN\tA\t127.0.0.1", + }, + } + + for rr, expected := range testcases { + out := Dedup([]RR{rr[0], rr[1], rr[2]}, nil) + for i, o := range out { + if o.String() != expected[i] { + t.Fatalf("expected %v, got %v", expected[i], o.String()) + } + } + } +} + +func BenchmarkDedup(b *testing.B) { + rrs := []RR{ + newRR(nil, "miEk.nl. 2000 IN A 127.0.0.1"), + newRR(nil, "mieK.Nl. 1000 IN A 127.0.0.1"), + newRR(nil, "Miek.nL. 500 IN A 127.0.0.1"), + } + m := make(map[string]RR) + for i := 0; i < b.N; i++ { + Dedup(rrs, m) + } +} + +func TestNormalizedString(t *testing.T) { + tests := map[RR]string{ + newRR(t, "mIEk.Nl. 3600 IN A 127.0.0.1"): "miek.nl.\tIN\tA\t127.0.0.1", + newRR(t, "m\\ iek.nL. 3600 IN A 127.0.0.1"): "m\\ iek.nl.\tIN\tA\t127.0.0.1", + newRR(t, "m\\\tIeK.nl. 3600 in A 127.0.0.1"): "m\\tiek.nl.\tIN\tA\t127.0.0.1", + } + for tc, expected := range tests { + n := normalizedString(tc) + if n != expected { + t.Logf("expected %s, got %s", expected, n) + t.Fail() + } + } +} + +func newRR(t *testing.T, s string) RR { + r, e := NewRR(s) + if e != nil { + t.Logf("newRR: %s", e) + } + return r +} diff --git a/vendor/github.com/miekg/dns/server_test.go b/vendor/github.com/miekg/dns/server_test.go new file mode 100644 index 0000000000..1b5cbc97ee --- /dev/null +++ b/vendor/github.com/miekg/dns/server_test.go @@ -0,0 +1,679 @@ +package dns + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "runtime" + "sync" + "testing" + "time" +) + +func HelloServer(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + w.WriteMsg(m) +} + +func HelloServerBadId(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + m.Id++ + + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + w.WriteMsg(m) +} + +func AnotherHelloServer(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello example"}} + w.WriteMsg(m) +} + +func RunLocalUDPServer(laddr string) (*Server, string, error) { + server, l, _, err := RunLocalUDPServerWithFinChan(laddr) + + return server, l, err +} + +func RunLocalUDPServerWithFinChan(laddr string) (*Server, string, chan struct{}, error) { + pc, err := net.ListenPacket("udp", laddr) + if err != nil { + return nil, "", nil, err + } + server := &Server{PacketConn: pc, ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + fin := make(chan struct{}, 0) + + go func() { + server.ActivateAndServe() + close(fin) + pc.Close() + }() + + waitLock.Lock() + return server, pc.LocalAddr().String(), fin, nil +} + +func RunLocalUDPServerUnsafe(laddr string) (*Server, string, error) { + pc, err := net.ListenPacket("udp", laddr) + if err != nil { + return nil, "", err + } + server := &Server{PacketConn: pc, Unsafe: true, + ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + go func() { + server.ActivateAndServe() + pc.Close() + }() + + waitLock.Lock() + return server, pc.LocalAddr().String(), nil +} + +func RunLocalTCPServer(laddr string) (*Server, string, error) { + l, err := net.Listen("tcp", laddr) + if err != nil { + return nil, "", err + } + + server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + go func() { + server.ActivateAndServe() + l.Close() + }() + + waitLock.Lock() + return server, l.Addr().String(), nil +} + +func RunLocalTLSServer(laddr string, config *tls.Config) (*Server, string, error) { + l, err := tls.Listen("tcp", laddr, config) + if err != nil { + return nil, "", err + } + + server := &Server{Listener: l, ReadTimeout: time.Hour, WriteTimeout: time.Hour} + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + + go func() { + server.ActivateAndServe() + l.Close() + }() + + waitLock.Lock() + return server, l.Addr().String(), nil +} + +func TestServing(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + HandleFunc("example.com.", AnotherHelloServer) + defer HandleRemove("miek.nl.") + defer HandleRemove("example.com.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + r, _, err := c.Exchange(m, addrstr) + if err != nil || len(r.Extra) == 0 { + t.Fatal("failed to exchange miek.nl", err) + } + txt := r.Extra[0].(*TXT).Txt[0] + if txt != "Hello world" { + t.Error("unexpected result for miek.nl", txt, "!= Hello world") + } + + m.SetQuestion("example.com.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("failed to exchange example.com", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } + + // Test Mixes cased as noticed by Ask. + m.SetQuestion("eXaMplE.cOm.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Error("failed to exchange eXaMplE.cOm", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } +} + +func TestServingTLS(t *testing.T) { + HandleFunc("miek.nl.", HelloServer) + HandleFunc("example.com.", AnotherHelloServer) + defer HandleRemove("miek.nl.") + defer HandleRemove("example.com.") + + cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) + if err != nil { + t.Fatalf("unable to build certificate: %v", err) + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + s, addrstr, err := RunLocalTLSServer("127.0.0.1:0", &config) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + c.Net = "tcp-tls" + c.TLSConfig = &tls.Config{ + InsecureSkipVerify: true, + } + + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + r, _, err := c.Exchange(m, addrstr) + if err != nil || len(r.Extra) == 0 { + t.Fatal("failed to exchange miek.nl", err) + } + txt := r.Extra[0].(*TXT).Txt[0] + if txt != "Hello world" { + t.Error("unexpected result for miek.nl", txt, "!= Hello world") + } + + m.SetQuestion("example.com.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("failed to exchange example.com", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } + + // Test Mixes cased as noticed by Ask. + m.SetQuestion("eXaMplE.cOm.", TypeTXT) + r, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Error("failed to exchange eXaMplE.cOm", err) + } + txt = r.Extra[0].(*TXT).Txt[0] + if txt != "Hello example" { + t.Error("unexpected result for example.com", txt, "!= Hello example") + } +} + +func BenchmarkServe(b *testing.B) { + b.StopTimer() + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + a := runtime.GOMAXPROCS(4) + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + b.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl", TypeSOA) + + b.StartTimer() + for i := 0; i < b.N; i++ { + c.Exchange(m, addrstr) + } + runtime.GOMAXPROCS(a) +} + +func benchmarkServe6(b *testing.B) { + b.StopTimer() + HandleFunc("miek.nl.", HelloServer) + defer HandleRemove("miek.nl.") + a := runtime.GOMAXPROCS(4) + s, addrstr, err := RunLocalUDPServer("[::1]:0") + if err != nil { + b.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl", TypeSOA) + + b.StartTimer() + for i := 0; i < b.N; i++ { + c.Exchange(m, addrstr) + } + runtime.GOMAXPROCS(a) +} + +func HelloServerCompress(w ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + m.Extra = make([]RR, 1) + m.Extra[0] = &TXT{Hdr: RR_Header{Name: m.Question[0].Name, Rrtype: TypeTXT, Class: ClassINET, Ttl: 0}, Txt: []string{"Hello world"}} + m.Compress = true + w.WriteMsg(m) +} + +func BenchmarkServeCompress(b *testing.B) { + b.StopTimer() + HandleFunc("miek.nl.", HelloServerCompress) + defer HandleRemove("miek.nl.") + a := runtime.GOMAXPROCS(4) + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + b.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl", TypeSOA) + b.StartTimer() + for i := 0; i < b.N; i++ { + c.Exchange(m, addrstr) + } + runtime.GOMAXPROCS(a) +} + +func TestDotAsCatchAllWildcard(t *testing.T) { + mux := NewServeMux() + mux.Handle(".", HandlerFunc(HelloServer)) + mux.Handle("example.com.", HandlerFunc(AnotherHelloServer)) + + handler := mux.match("www.miek.nl.", TypeTXT) + if handler == nil { + t.Error("wildcard match failed") + } + + handler = mux.match("www.example.com.", TypeTXT) + if handler == nil { + t.Error("example.com match failed") + } + + handler = mux.match("a.www.example.com.", TypeTXT) + if handler == nil { + t.Error("a.www.example.com match failed") + } + + handler = mux.match("boe.", TypeTXT) + if handler == nil { + t.Error("boe. match failed") + } +} + +func TestCaseFolding(t *testing.T) { + mux := NewServeMux() + mux.Handle("_udp.example.com.", HandlerFunc(HelloServer)) + + handler := mux.match("_dns._udp.example.com.", TypeSRV) + if handler == nil { + t.Error("case sensitive characters folded") + } + + handler = mux.match("_DNS._UDP.EXAMPLE.COM.", TypeSRV) + if handler == nil { + t.Error("case insensitive characters not folded") + } +} + +func TestRootServer(t *testing.T) { + mux := NewServeMux() + mux.Handle(".", HandlerFunc(HelloServer)) + + handler := mux.match(".", TypeNS) + if handler == nil { + t.Error("root match failed") + } +} + +type maxRec struct { + max int + sync.RWMutex +} + +var M = new(maxRec) + +func HelloServerLargeResponse(resp ResponseWriter, req *Msg) { + m := new(Msg) + m.SetReply(req) + m.Authoritative = true + m1 := 0 + M.RLock() + m1 = M.max + M.RUnlock() + for i := 0; i < m1; i++ { + aRec := &A{ + Hdr: RR_Header{ + Name: req.Question[0].Name, + Rrtype: TypeA, + Class: ClassINET, + Ttl: 0, + }, + A: net.ParseIP(fmt.Sprintf("127.0.0.%d", i+1)).To4(), + } + m.Answer = append(m.Answer, aRec) + } + resp.WriteMsg(m) +} + +func TestServingLargeResponses(t *testing.T) { + HandleFunc("example.", HelloServerLargeResponse) + defer HandleRemove("example.") + + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + // Create request + m := new(Msg) + m.SetQuestion("web.service.example.", TypeANY) + + c := new(Client) + c.Net = "udp" + M.Lock() + M.max = 2 + M.Unlock() + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } + // This must fail + M.Lock() + M.max = 20 + M.Unlock() + _, _, err = c.Exchange(m, addrstr) + if err == nil { + t.Error("failed to fail exchange, this should generate packet error") + } + // But this must work again + c.UDPSize = 7000 + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Errorf("failed to exchange: %v", err) + } +} + +func TestServingResponse(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + HandleFunc("miek.nl.", HelloServer) + s, addrstr, err := RunLocalUDPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + m.Response = false + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("failed to exchange", err) + } + m.Response = true + _, _, err = c.Exchange(m, addrstr) + if err == nil { + t.Fatal("exchanged response message") + } + + s.Shutdown() + s, addrstr, err = RunLocalUDPServerUnsafe("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + defer s.Shutdown() + + m.Response = true + _, _, err = c.Exchange(m, addrstr) + if err != nil { + t.Fatal("could exchanged response message in Unsafe mode") + } +} + +func TestShutdownTCP(t *testing.T) { + s, _, err := RunLocalTCPServer("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + err = s.Shutdown() + if err != nil { + t.Errorf("could not shutdown test TCP server, %v", err) + } +} + +func TestShutdownTLS(t *testing.T) { + cert, err := tls.X509KeyPair(CertPEMBlock, KeyPEMBlock) + if err != nil { + t.Fatalf("unable to build certificate: %v", err) + } + + config := tls.Config{ + Certificates: []tls.Certificate{cert}, + } + + s, _, err := RunLocalTLSServer("127.0.0.1:0", &config) + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + err = s.Shutdown() + if err != nil { + t.Errorf("could not shutdown test TLS server, %v", err) + } +} + +type trigger struct { + done bool + sync.RWMutex +} + +func (t *trigger) Set() { + t.Lock() + defer t.Unlock() + t.done = true +} +func (t *trigger) Get() bool { + t.RLock() + defer t.RUnlock() + return t.done +} + +func TestHandlerCloseTCP(t *testing.T) { + + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + panic(err) + } + addr := ln.Addr().String() + + server := &Server{Addr: addr, Net: "tcp", Listener: ln} + + hname := "testhandlerclosetcp." + triggered := &trigger{} + HandleFunc(hname, func(w ResponseWriter, r *Msg) { + triggered.Set() + w.Close() + }) + defer HandleRemove(hname) + + go func() { + defer server.Shutdown() + c := &Client{Net: "tcp"} + m := new(Msg).SetQuestion(hname, 1) + tries := 0 + exchange: + _, _, err := c.Exchange(m, addr) + if err != nil && err != io.EOF { + t.Logf("exchange failed: %s\n", err) + if tries == 3 { + return + } + time.Sleep(time.Second / 10) + tries += 1 + goto exchange + } + }() + server.ActivateAndServe() + if !triggered.Get() { + t.Fatalf("handler never called") + } +} + +func TestShutdownUDP(t *testing.T) { + s, _, fin, err := RunLocalUDPServerWithFinChan("127.0.0.1:0") + if err != nil { + t.Fatalf("unable to run test server: %v", err) + } + err = s.Shutdown() + if err != nil { + t.Errorf("could not shutdown test UDP server, %v", err) + } + select { + case <-fin: + case <-time.After(2 * time.Second): + t.Error("Could not shutdown test UDP server. Gave up waiting") + } +} + +type ExampleFrameLengthWriter struct { + Writer +} + +func (e *ExampleFrameLengthWriter) Write(m []byte) (int, error) { + fmt.Println("writing raw DNS message of length", len(m)) + return e.Writer.Write(m) +} + +func ExampleDecorateWriter() { + // instrument raw DNS message writing + wf := DecorateWriter(func(w Writer) Writer { + return &ExampleFrameLengthWriter{w} + }) + + // simple UDP server + pc, err := net.ListenPacket("udp", "127.0.0.1:0") + if err != nil { + fmt.Println(err.Error()) + return + } + server := &Server{ + PacketConn: pc, + DecorateWriter: wf, + ReadTimeout: time.Hour, WriteTimeout: time.Hour, + } + + waitLock := sync.Mutex{} + waitLock.Lock() + server.NotifyStartedFunc = waitLock.Unlock + defer server.Shutdown() + + go func() { + server.ActivateAndServe() + pc.Close() + }() + + waitLock.Lock() + + HandleFunc("miek.nl.", HelloServer) + + c := new(Client) + m := new(Msg) + m.SetQuestion("miek.nl.", TypeTXT) + _, _, err = c.Exchange(m, pc.LocalAddr().String()) + if err != nil { + fmt.Println("failed to exchange", err.Error()) + return + } + // Output: writing raw DNS message of length 56 +} + +var ( + // CertPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) + CertPEMBlock = []byte(`-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIRAJFYMkcn+b8dpU15wjf++GgwDQYJKoZIhvcNAQELBQAw +EjEQMA4GA1UEChMHQWNtZSBDbzAeFw0xNjAxMDgxMjAzNTNaFw0xNzAxMDcxMjAz +NTNaMBIxEDAOBgNVBAoTB0FjbWUgQ28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDXjqO6skvP03k58CNjQggd9G/mt+Wa+xRU+WXiKCCHttawM8x+slq5 +yfsHCwxlwsGn79HmJqecNqgHb2GWBXAvVVokFDTcC1hUP4+gp2gu9Ny27UHTjlLm +O0l/xZ5MN8tfKyYlFw18tXu3fkaPyHj8v/D1RDkuo4ARdFvGSe8TqisbhLk2+9ow +xfIGbEM9Fdiw8qByC2+d+FfvzIKz3GfQVwn0VoRom8L6NBIANq1IGrB5JefZB6nv +DnfuxkBmY7F1513HKuEJ8KsLWWZWV9OPU4j4I4Rt+WJNlKjbD2srHxyrS2RDsr91 +8nCkNoWVNO3sZq0XkWKecdc921vL4ginAgMBAAGjVDBSMA4GA1UdDwEB/wQEAwIC +pDATBgNVHSUEDDAKBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MBoGA1UdEQQT +MBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAGcU3iyLBIVZj +aDzSvEDHUd1bnLBl1C58Xu/CyKlPqVU7mLfK0JcgEaYQTSX6fCJVNLbbCrcGLsPJ +fbjlBbyeLjTV413fxPVuona62pBFjqdtbli2Qe8FRH2KBdm41JUJGdo+SdsFu7nc +BFOcubdw6LLIXvsTvwndKcHWx1rMX709QU1Vn1GAIsbJV/DWI231Jyyb+lxAUx/C +8vce5uVxiKcGS+g6OjsN3D3TtiEQGSXLh013W6Wsih8td8yMCMZ3w8LQ38br1GUe +ahLIgUJ9l6HDguM17R7kGqxNvbElsMUHfTtXXP7UDQUiYXDakg8xDP6n9DCDhJ8Y +bSt7OLB7NQ== +-----END CERTIFICATE-----`) + + // KeyPEMBlock is a X509 data used to test TLS servers (used with tls.X509KeyPair) + KeyPEMBlock = []byte(`-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA146jurJLz9N5OfAjY0IIHfRv5rflmvsUVPll4iggh7bWsDPM +frJaucn7BwsMZcLBp+/R5iannDaoB29hlgVwL1VaJBQ03AtYVD+PoKdoLvTctu1B +045S5jtJf8WeTDfLXysmJRcNfLV7t35Gj8h4/L/w9UQ5LqOAEXRbxknvE6orG4S5 +NvvaMMXyBmxDPRXYsPKgcgtvnfhX78yCs9xn0FcJ9FaEaJvC+jQSADatSBqweSXn +2Qep7w537sZAZmOxdeddxyrhCfCrC1lmVlfTj1OI+COEbfliTZSo2w9rKx8cq0tk +Q7K/dfJwpDaFlTTt7GatF5FinnHXPdtby+IIpwIDAQABAoIBAAJK4RDmPooqTJrC +JA41MJLo+5uvjwCT9QZmVKAQHzByUFw1YNJkITTiognUI0CdzqNzmH7jIFs39ZeG +proKusO2G6xQjrNcZ4cV2fgyb5g4QHStl0qhs94A+WojduiGm2IaumAgm6Mc5wDv +ld6HmknN3Mku/ZCyanVFEIjOVn2WB7ZQLTBs6ZYaebTJG2Xv6p9t2YJW7pPQ9Xce +s9ohAWohyM4X/OvfnfnLtQp2YLw/BxwehBsCR5SXM3ibTKpFNtxJC8hIfTuWtxZu +2ywrmXShYBRB1WgtZt5k04bY/HFncvvcHK3YfI1+w4URKtwdaQgPUQRbVwDwuyBn +flfkCJECgYEA/eWt01iEyE/lXkGn6V9lCocUU7lCU6yk5UT8VXVUc5If4KZKPfCk +p4zJDOqwn2eM673aWz/mG9mtvAvmnugaGjcaVCyXOp/D/GDmKSoYcvW5B/yjfkLy +dK6Yaa5LDRVYlYgyzcdCT5/9Qc626NzFwKCZNI4ncIU8g7ViATRxWJ8CgYEA2Ver +vZ0M606sfgC0H3NtwNBxmuJ+lIF5LNp/wDi07lDfxRR1rnZMX5dnxjcpDr/zvm8J +WtJJX3xMgqjtHuWKL3yKKony9J5ZPjichSbSbhrzfovgYIRZLxLLDy4MP9L3+CX/ +yBXnqMWuSnFX+M5fVGxdDWiYF3V+wmeOv9JvavkCgYEAiXAPDFzaY+R78O3xiu7M +r0o3wqqCMPE/wav6O/hrYrQy9VSO08C0IM6g9pEEUwWmzuXSkZqhYWoQFb8Lc/GI +T7CMXAxXQLDDUpbRgG79FR3Wr3AewHZU8LyiXHKwxcBMV4WGmsXGK3wbh8fyU1NO +6NsGk+BvkQVOoK1LBAPzZ1kCgYEAsBSmD8U33T9s4dxiEYTrqyV0lH3g/SFz8ZHH +pAyNEPI2iC1ONhyjPWKlcWHpAokiyOqeUpVBWnmSZtzC1qAydsxYB6ShT+sl9BHb +RMix/QAauzBJhQhUVJ3OIys0Q1UBDmqCsjCE8SfOT4NKOUnA093C+YT+iyrmmktZ +zDCJkckCgYEAndqM5KXGk5xYo+MAA1paZcbTUXwaWwjLU+XSRSSoyBEi5xMtfvUb +7+a1OMhLwWbuz+pl64wFKrbSUyimMOYQpjVE/1vk/kb99pxbgol27hdKyTH1d+ov +kFsxKCqxAnBVGEWAvVZAiiTOxleQFjz5RnL0BQp9Lg2cQe+dvuUmIAA= +-----END RSA PRIVATE KEY-----`) +) diff --git a/vendor/github.com/miekg/dns/sig0_test.go b/vendor/github.com/miekg/dns/sig0_test.go new file mode 100644 index 0000000000..122de6a8ec --- /dev/null +++ b/vendor/github.com/miekg/dns/sig0_test.go @@ -0,0 +1,89 @@ +package dns + +import ( + "crypto" + "testing" + "time" +) + +func TestSIG0(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + m := new(Msg) + m.SetQuestion("example.org.", TypeSOA) + for _, alg := range []uint8{ECDSAP256SHA256, ECDSAP384SHA384, RSASHA1, RSASHA256, RSASHA512} { + algstr := AlgorithmToString[alg] + keyrr := new(KEY) + keyrr.Hdr.Name = algstr + "." + keyrr.Hdr.Rrtype = TypeKEY + keyrr.Hdr.Class = ClassINET + keyrr.Algorithm = alg + keysize := 1024 + switch alg { + case ECDSAP256SHA256: + keysize = 256 + case ECDSAP384SHA384: + keysize = 384 + } + pk, err := keyrr.Generate(keysize) + if err != nil { + t.Errorf("failed to generate key for “%s”: %v", algstr, err) + continue + } + now := uint32(time.Now().Unix()) + sigrr := new(SIG) + sigrr.Hdr.Name = "." + sigrr.Hdr.Rrtype = TypeSIG + sigrr.Hdr.Class = ClassANY + sigrr.Algorithm = alg + sigrr.Expiration = now + 300 + sigrr.Inception = now - 300 + sigrr.KeyTag = keyrr.KeyTag() + sigrr.SignerName = keyrr.Hdr.Name + mb, err := sigrr.Sign(pk.(crypto.Signer), m) + if err != nil { + t.Errorf("failed to sign message using “%s”: %v", algstr, err) + continue + } + m := new(Msg) + if err := m.Unpack(mb); err != nil { + t.Errorf("failed to unpack message signed using “%s”: %v", algstr, err) + continue + } + if len(m.Extra) != 1 { + t.Errorf("missing SIG for message signed using “%s”", algstr) + continue + } + var sigrrwire *SIG + switch rr := m.Extra[0].(type) { + case *SIG: + sigrrwire = rr + default: + t.Errorf("expected SIG RR, instead: %v", rr) + continue + } + for _, rr := range []*SIG{sigrr, sigrrwire} { + id := "sigrr" + if rr == sigrrwire { + id = "sigrrwire" + } + if err := rr.Verify(keyrr, mb); err != nil { + t.Errorf("failed to verify “%s” signed SIG(%s): %v", algstr, id, err) + continue + } + } + mb[13]++ + if err := sigrr.Verify(keyrr, mb); err == nil { + t.Errorf("verify succeeded on an altered message using “%s”", algstr) + continue + } + sigrr.Expiration = 2 + sigrr.Inception = 1 + mb, _ = sigrr.Sign(pk.(crypto.Signer), m) + if err := sigrr.Verify(keyrr, mb); err == nil { + t.Errorf("verify succeeded on an expired message using “%s”", algstr) + continue + } + } +} diff --git a/vendor/github.com/miekg/dns/tsig_test.go b/vendor/github.com/miekg/dns/tsig_test.go new file mode 100644 index 0000000000..48b9988b66 --- /dev/null +++ b/vendor/github.com/miekg/dns/tsig_test.go @@ -0,0 +1,37 @@ +package dns + +import ( + "testing" + "time" +) + +func newTsig(algo string) *Msg { + m := new(Msg) + m.SetQuestion("example.org.", TypeA) + m.SetTsig("example.", algo, 300, time.Now().Unix()) + return m +} + +func TestTsig(t *testing.T) { + m := newTsig(HmacMD5) + buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } + err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } +} + +func TestTsigCase(t *testing.T) { + m := newTsig("HmAc-mD5.sig-ALg.rEg.int.") // HmacMD5 + buf, _, err := TsigGenerate(m, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } + err = TsigVerify(buf, "pRZgBrBvI4NAHZYhxmhs/Q==", "", false) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/miekg/dns/types_test.go b/vendor/github.com/miekg/dns/types_test.go new file mode 100644 index 0000000000..118612946b --- /dev/null +++ b/vendor/github.com/miekg/dns/types_test.go @@ -0,0 +1,42 @@ +package dns + +import ( + "testing" +) + +func TestCmToM(t *testing.T) { + s := cmToM(0, 0) + if s != "0.00" { + t.Error("0, 0") + } + + s = cmToM(1, 0) + if s != "0.01" { + t.Error("1, 0") + } + + s = cmToM(3, 1) + if s != "0.30" { + t.Error("3, 1") + } + + s = cmToM(4, 2) + if s != "4" { + t.Error("4, 2") + } + + s = cmToM(5, 3) + if s != "50" { + t.Error("5, 3") + } + + s = cmToM(7, 5) + if s != "7000" { + t.Error("7, 5") + } + + s = cmToM(9, 9) + if s != "90000000" { + t.Error("9, 9") + } +} diff --git a/vendor/github.com/miekg/dns/update_test.go b/vendor/github.com/miekg/dns/update_test.go new file mode 100644 index 0000000000..56602dfe9f --- /dev/null +++ b/vendor/github.com/miekg/dns/update_test.go @@ -0,0 +1,145 @@ +package dns + +import ( + "bytes" + "testing" +) + +func TestDynamicUpdateParsing(t *testing.T) { + prefix := "example.com. IN " + for _, typ := range TypeToString { + if typ == "OPT" || typ == "AXFR" || typ == "IXFR" || typ == "ANY" || typ == "TKEY" || + typ == "TSIG" || typ == "ISDN" || typ == "UNSPEC" || typ == "NULL" || typ == "ATMA" || + typ == "Reserved" || typ == "None" || typ == "NXT" || typ == "MAILB" || typ == "MAILA" { + continue + } + r, err := NewRR(prefix + typ) + if err != nil { + t.Errorf("failure to parse: %s %s: %v", prefix, typ, err) + } else { + t.Logf("parsed: %s", r.String()) + } + } +} + +func TestDynamicUpdateUnpack(t *testing.T) { + // From https://github.com/miekg/dns/issues/150#issuecomment-62296803 + // It should be an update message for the zone "example.", + // deleting the A RRset "example." and then adding an A record at "example.". + // class ANY, TYPE A + buf := []byte{171, 68, 40, 0, 0, 1, 0, 0, 0, 2, 0, 0, 7, 101, 120, 97, 109, 112, 108, 101, 0, 0, 6, 0, 1, 192, 12, 0, 1, 0, 255, 0, 0, 0, 0, 0, 0, 192, 12, 0, 1, 0, 1, 0, 0, 0, 0, 0, 4, 127, 0, 0, 1} + msg := new(Msg) + err := msg.Unpack(buf) + if err != nil { + t.Errorf("failed to unpack: %v\n%s", err, msg.String()) + } +} + +func TestDynamicUpdateZeroRdataUnpack(t *testing.T) { + m := new(Msg) + rr := &RR_Header{Name: ".", Rrtype: 0, Class: 1, Ttl: ^uint32(0), Rdlength: 0} + m.Answer = []RR{rr, rr, rr, rr, rr} + m.Ns = m.Answer + for n, s := range TypeToString { + rr.Rrtype = n + bytes, err := m.Pack() + if err != nil { + t.Errorf("failed to pack %s: %v", s, err) + continue + } + if err := new(Msg).Unpack(bytes); err != nil { + t.Errorf("failed to unpack %s: %v", s, err) + } + } +} + +func TestRemoveRRset(t *testing.T) { + // Should add a zero data RR in Class ANY with a TTL of 0 + // for each set mentioned in the RRs provided to it. + rr, err := NewRR(". 100 IN A 127.0.0.1") + if err != nil { + t.Fatalf("error constructing RR: %v", err) + } + m := new(Msg) + m.Ns = []RR{&RR_Header{Name: ".", Rrtype: TypeA, Class: ClassANY, Ttl: 0, Rdlength: 0}} + expectstr := m.String() + expect, err := m.Pack() + if err != nil { + t.Fatalf("error packing expected msg: %v", err) + } + + m.Ns = nil + m.RemoveRRset([]RR{rr}) + actual, err := m.Pack() + if err != nil { + t.Fatalf("error packing actual msg: %v", err) + } + if !bytes.Equal(actual, expect) { + tmp := new(Msg) + if err := tmp.Unpack(actual); err != nil { + t.Fatalf("error unpacking actual msg: %v\nexpected: %v\ngot: %v\n", err, expect, actual) + } + t.Errorf("expected msg:\n%s", expectstr) + t.Errorf("actual msg:\n%v", tmp) + } +} + +func TestPreReqAndRemovals(t *testing.T) { + // Build a list of multiple prereqs and then somes removes followed by an insert. + // We should be able to add multiple prereqs and updates. + m := new(Msg) + m.SetUpdate("example.org.") + m.Id = 1234 + + // Use a full set of RRs each time, so we are sure the rdata is stripped. + rr_name1, _ := NewRR("name_used. 3600 IN A 127.0.0.1") + rr_name2, _ := NewRR("name_not_used. 3600 IN A 127.0.0.1") + rr_remove1, _ := NewRR("remove1. 3600 IN A 127.0.0.1") + rr_remove2, _ := NewRR("remove2. 3600 IN A 127.0.0.1") + rr_remove3, _ := NewRR("remove3. 3600 IN A 127.0.0.1") + rr_insert, _ := NewRR("insert. 3600 IN A 127.0.0.1") + rr_rrset1, _ := NewRR("rrset_used1. 3600 IN A 127.0.0.1") + rr_rrset2, _ := NewRR("rrset_used2. 3600 IN A 127.0.0.1") + rr_rrset3, _ := NewRR("rrset_not_used. 3600 IN A 127.0.0.1") + + // Handle the prereqs. + m.NameUsed([]RR{rr_name1}) + m.NameNotUsed([]RR{rr_name2}) + m.RRsetUsed([]RR{rr_rrset1}) + m.Used([]RR{rr_rrset2}) + m.RRsetNotUsed([]RR{rr_rrset3}) + + // and now the updates. + m.RemoveName([]RR{rr_remove1}) + m.RemoveRRset([]RR{rr_remove2}) + m.Remove([]RR{rr_remove3}) + m.Insert([]RR{rr_insert}) + + // This test function isn't a Example function because we print these RR with tabs at the + // end and the Example function trim these, thus they never match. + // TODO(miek): don't print these tabs and make this into an Example function. + expect := `;; opcode: UPDATE, status: NOERROR, id: 1234 +;; flags:; QUERY: 1, ANSWER: 5, AUTHORITY: 4, ADDITIONAL: 0 + +;; QUESTION SECTION: +;example.org. IN SOA + +;; ANSWER SECTION: +name_used. 0 ANY ANY +name_not_used. 0 NONE ANY +rrset_used1. 0 ANY A +rrset_used2. 3600 IN A 127.0.0.1 +rrset_not_used. 0 NONE A + +;; AUTHORITY SECTION: +remove1. 0 ANY ANY +remove2. 0 ANY A +remove3. 0 NONE A 127.0.0.1 +insert. 3600 IN A 127.0.0.1 +` + + if m.String() != expect { + t.Errorf("expected msg:\n%s", expect) + t.Errorf("actual msg:\n%v", m.String()) + } +} diff --git a/vendor/github.com/miekg/dns/xfr_test.go b/vendor/github.com/miekg/dns/xfr_test.go new file mode 100644 index 0000000000..1337eec654 --- /dev/null +++ b/vendor/github.com/miekg/dns/xfr_test.go @@ -0,0 +1,161 @@ +// +build net + +package dns + +import ( + "net" + "testing" + "time" +) + +func getIP(s string) string { + a, err := net.LookupAddr(s) + if err != nil { + return "" + } + return a[0] +} + +// flaky, need to setup local server and test from +// that. +func TestAXFR_Miek(t *testing.T) { + // This test runs against a server maintained by Miek + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("miek.nl.") + + server := getIP("linode.atoom.net") + + tr := new(Transfer) + + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Fatal("failed to setup axfr: ", err) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + for _, rr := range ex.RR { + t.Log(rr.String()) + } + } + } +} + +// fails. +func TestAXFR_NLNL_MultipleEnvelopes(t *testing.T) { + // This test runs against a server maintained by NLnet Labs + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("nlnetlabs.nl.") + + server := getIP("open.nlnetlabs.nl.") + + tr := new(Transfer) + if a, err := tr.In(m, net.JoinHostPort(server, "53")); err != nil { + t.Fatalf("failed to setup axfr %v for server: %v", err, server) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + } + } +} + +func TestAXFR_Miek_Tsig(t *testing.T) { + // This test runs against a server maintained by Miek + if testing.Short() { + return + } + m := new(Msg) + m.SetAxfr("example.nl.") + m.SetTsig("axfr.", HmacMD5, 300, time.Now().Unix()) + + tr := new(Transfer) + tr.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} + + if a, err := tr.In(m, "176.58.119.54:53"); err != nil { + t.Fatal("failed to setup axfr: ", err) + } else { + for ex := range a { + if ex.Error != nil { + t.Errorf("error %v", ex.Error) + break + } + for _, rr := range ex.RR { + t.Log(rr.String()) + } + } + } +} + +func TestAXFR_SIDN_NSD3_NONE(t *testing.T) { testAXFRSIDN(t, "nsd", "") } +func TestAXFR_SIDN_NSD3_MD5(t *testing.T) { testAXFRSIDN(t, "nsd", HmacMD5) } +func TestAXFR_SIDN_NSD3_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA1) } +func TestAXFR_SIDN_NSD3_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd", HmacSHA256) } + +func TestAXFR_SIDN_NSD4_NONE(t *testing.T) { testAXFRSIDN(t, "nsd4", "") } +func TestAXFR_SIDN_NSD4_MD5(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacMD5) } +func TestAXFR_SIDN_NSD4_SHA1(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA1) } +func TestAXFR_SIDN_NSD4_SHA256(t *testing.T) { testAXFRSIDN(t, "nsd4", HmacSHA256) } + +func TestAXFR_SIDN_BIND9_NONE(t *testing.T) { testAXFRSIDN(t, "bind9", "") } +func TestAXFR_SIDN_BIND9_MD5(t *testing.T) { testAXFRSIDN(t, "bind9", HmacMD5) } +func TestAXFR_SIDN_BIND9_SHA1(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA1) } +func TestAXFR_SIDN_BIND9_SHA256(t *testing.T) { testAXFRSIDN(t, "bind9", HmacSHA256) } + +func TestAXFR_SIDN_KNOT_NONE(t *testing.T) { testAXFRSIDN(t, "knot", "") } +func TestAXFR_SIDN_KNOT_MD5(t *testing.T) { testAXFRSIDN(t, "knot", HmacMD5) } +func TestAXFR_SIDN_KNOT_SHA1(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA1) } +func TestAXFR_SIDN_KNOT_SHA256(t *testing.T) { testAXFRSIDN(t, "knot", HmacSHA256) } + +func TestAXFR_SIDN_POWERDNS_NONE(t *testing.T) { testAXFRSIDN(t, "powerdns", "") } +func TestAXFR_SIDN_POWERDNS_MD5(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacMD5) } +func TestAXFR_SIDN_POWERDNS_SHA1(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA1) } +func TestAXFR_SIDN_POWERDNS_SHA256(t *testing.T) { testAXFRSIDN(t, "powerdns", HmacSHA256) } + +func TestAXFR_SIDN_YADIFA_NONE(t *testing.T) { testAXFRSIDN(t, "yadifa", "") } +func TestAXFR_SIDN_YADIFA_MD5(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacMD5) } +func TestAXFR_SIDN_YADIFA_SHA1(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA1) } +func TestAXFR_SIDN_YADIFA_SHA256(t *testing.T) { testAXFRSIDN(t, "yadifa", HmacSHA256) } + +func testAXFRSIDN(t *testing.T, host, alg string) { + // This tests run against a server maintained by SIDN labs, see: + // https://workbench.sidnlabs.nl/ + if testing.Short() { + return + } + x := new(Transfer) + x.TsigSecret = map[string]string{ + "wb_md5.": "Wu/utSasZUkoeCNku152Zw==", + "wb_sha1_longkey.": "uhMpEhPq/RAD9Bt4mqhfmi+7ZdKmjLQb/lcrqYPXR4s/nnbsqw==", + "wb_sha256.": "npfrIJjt/MJOjGJoBNZtsjftKMhkSpIYMv2RzRZt1f8=", + } + keyname := map[string]string{ + HmacMD5: "wb_md5.", + HmacSHA1: "wb_sha1_longkey.", + HmacSHA256: "wb_sha256.", + }[alg] + + m := new(Msg) + m.SetAxfr("types.wb.sidnlabs.nl.") + if keyname != "" { + m.SetTsig(keyname, alg, 300, time.Now().Unix()) + } + c, err := x.In(m, host+".sidnlabs.nl:53") + if err != nil { + t.Fatal(err) + } + for e := range c { + if e.Error != nil { + t.Fatal(e.Error) + } + } +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000000..d70706d5b3 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000000..47e1f9ef8e --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,137 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +func dirUnix() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // If that fails, try getent + var stdout bytes.Buffer + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd = exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + home = os.Getenv("USERPROFILE") + } + if home == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/mitchellh/go-homedir/homedir_test.go b/vendor/github.com/mitchellh/go-homedir/homedir_test.go new file mode 100644 index 0000000000..e4054e72a3 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir_test.go @@ -0,0 +1,112 @@ +package homedir + +import ( + "os" + "os/user" + "path/filepath" + "testing" +) + +func patchEnv(key, value string) func() { + bck := os.Getenv(key) + deferFunc := func() { + os.Setenv(key, bck) + } + + os.Setenv(key, value) + return deferFunc +} + +func BenchmarkDir(b *testing.B) { + // We do this for any "warmups" + for i := 0; i < 10; i++ { + Dir() + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + Dir() + } +} + +func TestDir(t *testing.T) { + u, err := user.Current() + if err != nil { + t.Fatalf("err: %s", err) + } + + dir, err := Dir() + if err != nil { + t.Fatalf("err: %s", err) + } + + if u.HomeDir != dir { + t.Fatalf("%#v != %#v", u.HomeDir, dir) + } +} + +func TestExpand(t *testing.T) { + u, err := user.Current() + if err != nil { + t.Fatalf("err: %s", err) + } + + cases := []struct { + Input string + Output string + Err bool + }{ + { + "/foo", + "/foo", + false, + }, + + { + "~/foo", + filepath.Join(u.HomeDir, "foo"), + false, + }, + + { + "", + "", + false, + }, + + { + "~", + u.HomeDir, + false, + }, + + { + "~foo/foo", + "", + true, + }, + } + + for _, tc := range cases { + actual, err := Expand(tc.Input) + if (err != nil) != tc.Err { + t.Fatalf("Input: %#v\n\nErr: %s", tc.Input, err) + } + + if actual != tc.Output { + t.Fatalf("Input: %#v\n\nOutput: %#v", tc.Input, actual) + } + } + + DisableCache = true + defer func() { DisableCache = false }() + defer patchEnv("HOME", "/custom/path/")() + expected := filepath.Join("/", "custom", "path", "foo/bar") + actual, err := Expand("~/foo/bar") + + if err != nil { + t.Errorf("No error is expected, got: %v", err) + } else if actual != expected { + t.Errorf("Expected: %v; actual: %v", expected, actual) + } +} diff --git a/vendor/github.com/mreiferson/go-snappystream/.travis.yml b/vendor/github.com/mreiferson/go-snappystream/.travis.yml new file mode 100644 index 0000000000..a09420b313 --- /dev/null +++ b/vendor/github.com/mreiferson/go-snappystream/.travis.yml @@ -0,0 +1,13 @@ +language: go +go: + - 1.2.2 + - 1.3.1 +env: + - GOARCH=amd64 + - GOARCH=386 +install: + - go get code.google.com/p/snappy-go/snappy +script: + - go test -v +notifications: + email: false diff --git a/vendor/github.com/mreiferson/go-snappystream/fixturedata_test.go b/vendor/github.com/mreiferson/go-snappystream/fixturedata_test.go new file mode 100644 index 0000000000..fed03f5806 --- /dev/null +++ b/vendor/github.com/mreiferson/go-snappystream/fixturedata_test.go @@ -0,0 +1,2701 @@ +package snappystream + +var testDataMan = []byte(` +.TH XARGS 1L \" -*- nroff -*- +.SH NAME +xargs \- build and execute command lines from standard input +.SH SYNOPSIS +.B xargs +[\-0prtx] [\-e[eof-str]] [\-i[replace-str]] [\-l[max-lines]] +[\-n max-args] [\-s max-chars] [\-P max-procs] [\-\-null] [\-\-eof[=eof-str]] +[\-\-replace[=replace-str]] [\-\-max-lines[=max-lines]] [\-\-interactive] +[\-\-max-chars=max-chars] [\-\-verbose] [\-\-exit] [\-\-max-procs=max-procs] +[\-\-max-args=max-args] [\-\-no-run-if-empty] [\-\-version] [\-\-help] +[command [initial-arguments]] +.SH DESCRIPTION +This manual page +documents the GNU version of +.BR xargs . +.B xargs +reads arguments from the standard input, delimited by blanks (which can be +protected with double or single quotes or a backslash) or newlines, +and executes the +.I command +(default is /bin/echo) one or more times with any +.I initial-arguments +followed by arguments read from standard input. Blank lines on the +standard input are ignored. +.P +.B xargs +exits with the following status: +.nf +0 if it succeeds +123 if any invocation of the command exited with status 1-125 +124 if the command exited with status 255 +125 if the command is killed by a signal +126 if the command cannot be run +127 if the command is not found +1 if some other error occurred. +.fi +.SS OPTIONS +.TP +.I "\-\-null, \-0" +Input filenames are terminated by a null character instead of by +whitespace, and the quotes and backslash are not special (every +character is taken literally). Disables the end of file string, which +is treated like any other argument. Useful when arguments might +contain white space, quote marks, or backslashes. The GNU find +\-print0 option produces input suitable for this mode. +.TP +.I "\-\-eof[=eof-str], \-e[eof-str]" +Set the end of file string to \fIeof-str\fR. If the end of file +string occurs as a line of input, the rest of the input is ignored. +If \fIeof-str\fR is omitted, there is no end of file string. If this +option is not given, the end of file string defaults to "_". +.TP +.I "\-\-help" +Print a summary of the options to +.B xargs +and exit. +.TP +.I "\-\-replace[=replace-str], \-i[replace-str]" +Replace occurences of \fIreplace-str\fR in the initial arguments with +names read from standard input. +Also, unquoted blanks do not terminate arguments. +If \fIreplace-str\fR is omitted, it +defaults to "{}" (like for 'find \-exec'). Implies \fI\-x\fP and +\fI\-l 1\fP. +.TP +.I "\-\-max-lines[=max-lines], -l[max-lines]" +Use at most \fImax-lines\fR nonblank input lines per command line; +\fImax-lines\fR defaults to 1 if omitted. Trailing blanks cause an +input line to be logically continued on the next input line. Implies +\fI\-x\fR. +.TP +.I "\-\-max-args=max-args, \-n max-args" +Use at most \fImax-args\fR arguments per command line. Fewer than +\fImax-args\fR arguments will be used if the size (see the \-s option) +is exceeded, unless the \-x option is given, in which case \fBxargs\fR +will exit. +.TP +.I "\-\-interactive, \-p" +Prompt the user about whether to run each command line and read a line +from the terminal. Only run the command line if the response starts +with 'y' or 'Y'. Implies \fI\-t\fR. +.TP +.I "\-\-no-run-if-empty, \-r" +If the standard input does not contain any nonblanks, do not run the +command. Normally, the command is run once even if there is no input. +.TP +.I "\-\-max-chars=max-chars, \-s max-chars" +Use at most \fImax-chars\fR characters per command line, including the +command and initial arguments and the terminating nulls at the ends of +the argument strings. The default is as large as possible, up to 20k +characters. +.TP +.I "\-\-verbose, \-t" +Print the command line on the standard error output before executing +it. +.TP +.I "\-\-version" +Print the version number of +.B xargs +and exit. +.TP +.I "\-\-exit, \-x" +Exit if the size (see the \fI\-s\fR option) is exceeded. +.TP +.I "\-\-max-procs=max-procs, \-P max-procs" +Run up to \fImax-procs\fR processes at a time; the default is 1. If +\fImax-procs\fR is 0, \fBxargs\fR will run as many processes as +possible at a time. Use the \fI\-n\fR option with \fI\-P\fR; +otherwise chances are that only one exec will be done. +.SH "SEE ALSO" +\fBfind\fP(1L), \fBlocate\fP(1L), \fBlocatedb\fP(5L), \fBupdatedb\fP(1) +\fBFinding Files\fP (on-line in Info, or printed)`) + +// curl -s https://api.github.com/users/mreiferson/repos +var testDataJSON = []byte(` +[ + { + "id": 19041094, + "name": "2014-talks", + "full_name": "mreiferson/2014-talks", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/2014-talks", + "description": "This is the official repository for slides and talks from GopherCon 2014", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/2014-talks", + "forks_url": "https://api.github.com/repos/mreiferson/2014-talks/forks", + "keys_url": "https://api.github.com/repos/mreiferson/2014-talks/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/2014-talks/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/2014-talks/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/2014-talks/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/2014-talks/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/2014-talks/events", + "assignees_url": "https://api.github.com/repos/mreiferson/2014-talks/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/2014-talks/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/2014-talks/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/2014-talks/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/2014-talks/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/2014-talks/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/2014-talks/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/2014-talks/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/2014-talks/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/2014-talks/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/2014-talks/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/2014-talks/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/2014-talks/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/2014-talks/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/2014-talks/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/2014-talks/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/2014-talks/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/2014-talks/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/2014-talks/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/2014-talks/merges", + "archive_url": "https://api.github.com/repos/mreiferson/2014-talks/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/2014-talks/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/2014-talks/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/2014-talks/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/2014-talks/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/2014-talks/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/2014-talks/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/2014-talks/releases{/id}", + "created_at": "2014-04-22T18:28:59Z", + "updated_at": "2014-04-26T03:10:39Z", + "pushed_at": "2014-04-25T14:46:35Z", + "git_url": "git://github.com/mreiferson/2014-talks.git", + "ssh_url": "git@github.com:mreiferson/2014-talks.git", + "clone_url": "https://github.com/mreiferson/2014-talks.git", + "svn_url": "https://github.com/mreiferson/2014-talks", + "homepage": null, + "size": 3596, + "stargazers_count": 0, + "watchers_count": 0, + "language": null, + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 3329246, + "name": "asyncdynamo", + "full_name": "mreiferson/asyncdynamo", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/asyncdynamo", + "description": "async Amazon DynamoDB library for Tornado", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/asyncdynamo", + "forks_url": "https://api.github.com/repos/mreiferson/asyncdynamo/forks", + "keys_url": "https://api.github.com/repos/mreiferson/asyncdynamo/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/asyncdynamo/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/asyncdynamo/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/asyncdynamo/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/asyncdynamo/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/asyncdynamo/events", + "assignees_url": "https://api.github.com/repos/mreiferson/asyncdynamo/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/asyncdynamo/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/asyncdynamo/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/asyncdynamo/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/asyncdynamo/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/asyncdynamo/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/asyncdynamo/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/asyncdynamo/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/asyncdynamo/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/asyncdynamo/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/asyncdynamo/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/asyncdynamo/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/asyncdynamo/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/asyncdynamo/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/asyncdynamo/merges", + "archive_url": "https://api.github.com/repos/mreiferson/asyncdynamo/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/asyncdynamo/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/asyncdynamo/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/asyncdynamo/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/asyncdynamo/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/asyncdynamo/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/asyncdynamo/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/asyncdynamo/releases{/id}", + "created_at": "2012-02-01T21:32:54Z", + "updated_at": "2014-04-03T21:58:44Z", + "pushed_at": "2012-02-01T21:06:23Z", + "git_url": "git://github.com/mreiferson/asyncdynamo.git", + "ssh_url": "git@github.com:mreiferson/asyncdynamo.git", + "clone_url": "https://github.com/mreiferson/asyncdynamo.git", + "svn_url": "https://github.com/mreiferson/asyncdynamo", + "homepage": "", + "size": 73, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Python", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 2622445, + "name": "asyncmongo", + "full_name": "mreiferson/asyncmongo", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/asyncmongo", + "description": "An asynchronous library for accessing mongo with tornado.ioloop", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/asyncmongo", + "forks_url": "https://api.github.com/repos/mreiferson/asyncmongo/forks", + "keys_url": "https://api.github.com/repos/mreiferson/asyncmongo/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/asyncmongo/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/asyncmongo/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/asyncmongo/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/asyncmongo/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/asyncmongo/events", + "assignees_url": "https://api.github.com/repos/mreiferson/asyncmongo/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/asyncmongo/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/asyncmongo/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/asyncmongo/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/asyncmongo/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/asyncmongo/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/asyncmongo/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/asyncmongo/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/asyncmongo/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/asyncmongo/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/asyncmongo/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/asyncmongo/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/asyncmongo/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/asyncmongo/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/asyncmongo/merges", + "archive_url": "https://api.github.com/repos/mreiferson/asyncmongo/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/asyncmongo/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/asyncmongo/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/asyncmongo/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/asyncmongo/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/asyncmongo/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/asyncmongo/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/asyncmongo/releases{/id}", + "created_at": "2011-10-21T19:01:05Z", + "updated_at": "2013-01-04T11:58:26Z", + "pushed_at": "2011-10-21T19:02:46Z", + "git_url": "git://github.com/mreiferson/asyncmongo.git", + "ssh_url": "git@github.com:mreiferson/asyncmongo.git", + "clone_url": "https://github.com/mreiferson/asyncmongo.git", + "svn_url": "https://github.com/mreiferson/asyncmongo", + "homepage": "http://github.com/bitly/asyncmongo", + "size": 563, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Python", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 4554560, + "name": "blog.perplexedlabs.com", + "full_name": "mreiferson/blog.perplexedlabs.com", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/blog.perplexedlabs.com", + "description": "archive of posts", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com", + "forks_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/forks", + "keys_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/events", + "assignees_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/merges", + "archive_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/releases{/id}", + "created_at": "2012-06-05T01:38:40Z", + "updated_at": "2014-04-27T23:44:56Z", + "pushed_at": "2014-04-27T23:44:56Z", + "git_url": "git://github.com/mreiferson/blog.perplexedlabs.com.git", + "ssh_url": "git@github.com:mreiferson/blog.perplexedlabs.com.git", + "clone_url": "https://github.com/mreiferson/blog.perplexedlabs.com.git", + "svn_url": "https://github.com/mreiferson/blog.perplexedlabs.com", + "homepage": "http://blog.perplexedlabs.com/", + "size": 668, + "stargazers_count": 1, + "watchers_count": 1, + "language": null, + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 2861903, + "name": "btpath", + "full_name": "mreiferson/btpath", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/btpath", + "description": "A* implementation/test app (1997)", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/btpath", + "forks_url": "https://api.github.com/repos/mreiferson/btpath/forks", + "keys_url": "https://api.github.com/repos/mreiferson/btpath/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/btpath/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/btpath/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/btpath/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/btpath/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/btpath/events", + "assignees_url": "https://api.github.com/repos/mreiferson/btpath/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/btpath/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/btpath/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/btpath/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/btpath/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/btpath/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/btpath/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/btpath/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/btpath/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/btpath/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/btpath/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/btpath/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/btpath/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/btpath/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/btpath/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/btpath/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/btpath/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/btpath/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/btpath/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/btpath/merges", + "archive_url": "https://api.github.com/repos/mreiferson/btpath/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/btpath/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/btpath/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/btpath/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/btpath/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/btpath/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/btpath/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/btpath/releases{/id}", + "created_at": "2011-11-27T17:23:02Z", + "updated_at": "2013-01-04T17:58:42Z", + "pushed_at": "2011-11-29T01:36:49Z", + "git_url": "git://github.com/mreiferson/btpath.git", + "ssh_url": "git@github.com:mreiferson/btpath.git", + "clone_url": "https://github.com/mreiferson/btpath.git", + "svn_url": "https://github.com/mreiferson/btpath", + "homepage": "", + "size": 88, + "stargazers_count": 1, + "watchers_count": 1, + "language": "C++", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 15747148, + "name": "chef-nsq", + "full_name": "mreiferson/chef-nsq", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/chef-nsq", + "description": "Chef Cookbook for NSQ", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/chef-nsq", + "forks_url": "https://api.github.com/repos/mreiferson/chef-nsq/forks", + "keys_url": "https://api.github.com/repos/mreiferson/chef-nsq/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/chef-nsq/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/chef-nsq/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/chef-nsq/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/chef-nsq/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/chef-nsq/events", + "assignees_url": "https://api.github.com/repos/mreiferson/chef-nsq/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/chef-nsq/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/chef-nsq/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/chef-nsq/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/chef-nsq/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/chef-nsq/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/chef-nsq/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/chef-nsq/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/chef-nsq/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/chef-nsq/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/chef-nsq/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/chef-nsq/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/chef-nsq/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/chef-nsq/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/chef-nsq/merges", + "archive_url": "https://api.github.com/repos/mreiferson/chef-nsq/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/chef-nsq/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/chef-nsq/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/chef-nsq/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/chef-nsq/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/chef-nsq/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/chef-nsq/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/chef-nsq/releases{/id}", + "created_at": "2014-01-08T20:27:41Z", + "updated_at": "2014-04-28T14:15:50Z", + "pushed_at": "2014-04-28T04:31:58Z", + "git_url": "git://github.com/mreiferson/chef-nsq.git", + "ssh_url": "git@github.com:mreiferson/chef-nsq.git", + "clone_url": "https://github.com/mreiferson/chef-nsq.git", + "svn_url": "https://github.com/mreiferson/chef-nsq", + "homepage": null, + "size": 132, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Ruby", + "has_issues": false, + "has_downloads": true, + "has_wiki": false, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 5287337, + "name": "dablooms", + "full_name": "mreiferson/dablooms", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/dablooms", + "description": "scaling, counting, bloom filter library", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/dablooms", + "forks_url": "https://api.github.com/repos/mreiferson/dablooms/forks", + "keys_url": "https://api.github.com/repos/mreiferson/dablooms/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/dablooms/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/dablooms/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/dablooms/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/dablooms/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/dablooms/events", + "assignees_url": "https://api.github.com/repos/mreiferson/dablooms/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/dablooms/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/dablooms/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/dablooms/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/dablooms/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/dablooms/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/dablooms/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/dablooms/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/dablooms/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/dablooms/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/dablooms/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/dablooms/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/dablooms/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/dablooms/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/dablooms/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/dablooms/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/dablooms/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/dablooms/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/dablooms/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/dablooms/merges", + "archive_url": "https://api.github.com/repos/mreiferson/dablooms/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/dablooms/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/dablooms/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/dablooms/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/dablooms/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/dablooms/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/dablooms/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/dablooms/releases{/id}", + "created_at": "2012-08-03T16:03:50Z", + "updated_at": "2013-03-08T15:37:44Z", + "pushed_at": "2013-03-08T15:37:44Z", + "git_url": "git://github.com/mreiferson/dablooms.git", + "ssh_url": "git@github.com:mreiferson/dablooms.git", + "clone_url": "https://github.com/mreiferson/dablooms.git", + "svn_url": "https://github.com/mreiferson/dablooms", + "homepage": "", + "size": 186, + "stargazers_count": 1, + "watchers_count": 1, + "language": "C", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 2861959, + "name": "dod", + "full_name": "mreiferson/dod", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/dod", + "description": "Do or Die - an incomplete real-time strategy game inspired by Warcraft (1997)", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/dod", + "forks_url": "https://api.github.com/repos/mreiferson/dod/forks", + "keys_url": "https://api.github.com/repos/mreiferson/dod/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/dod/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/dod/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/dod/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/dod/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/dod/events", + "assignees_url": "https://api.github.com/repos/mreiferson/dod/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/dod/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/dod/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/dod/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/dod/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/dod/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/dod/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/dod/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/dod/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/dod/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/dod/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/dod/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/dod/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/dod/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/dod/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/dod/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/dod/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/dod/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/dod/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/dod/merges", + "archive_url": "https://api.github.com/repos/mreiferson/dod/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/dod/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/dod/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/dod/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/dod/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/dod/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/dod/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/dod/releases{/id}", + "created_at": "2011-11-27T17:33:19Z", + "updated_at": "2014-05-13T00:56:53Z", + "pushed_at": "2011-11-29T02:08:57Z", + "git_url": "git://github.com/mreiferson/dod.git", + "ssh_url": "git@github.com:mreiferson/dod.git", + "clone_url": "https://github.com/mreiferson/dod.git", + "svn_url": "https://github.com/mreiferson/dod", + "homepage": "", + "size": 2044, + "stargazers_count": 1, + "watchers_count": 1, + "language": "C++", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 4515792, + "name": "doozer", + "full_name": "mreiferson/doozer", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/doozer", + "description": "Go client driver for doozerd, a consistent, distributed data store", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/doozer", + "forks_url": "https://api.github.com/repos/mreiferson/doozer/forks", + "keys_url": "https://api.github.com/repos/mreiferson/doozer/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/doozer/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/doozer/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/doozer/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/doozer/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/doozer/events", + "assignees_url": "https://api.github.com/repos/mreiferson/doozer/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/doozer/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/doozer/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/doozer/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/doozer/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/doozer/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/doozer/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/doozer/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/doozer/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/doozer/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/doozer/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/doozer/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/doozer/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/doozer/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/doozer/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/doozer/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/doozer/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/doozer/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/doozer/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/doozer/merges", + "archive_url": "https://api.github.com/repos/mreiferson/doozer/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/doozer/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/doozer/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/doozer/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/doozer/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/doozer/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/doozer/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/doozer/releases{/id}", + "created_at": "2012-06-01T03:41:14Z", + "updated_at": "2013-03-16T15:23:56Z", + "pushed_at": "2013-03-16T15:23:55Z", + "git_url": "git://github.com/mreiferson/doozer.git", + "ssh_url": "git@github.com:mreiferson/doozer.git", + "clone_url": "https://github.com/mreiferson/doozer.git", + "svn_url": "https://github.com/mreiferson/doozer", + "homepage": "https://github.com/ha/doozerd", + "size": 2584, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 3391437, + "name": "doozer-c", + "full_name": "mreiferson/doozer-c", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/doozer-c", + "description": "async C client library for doozerd", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/doozer-c", + "forks_url": "https://api.github.com/repos/mreiferson/doozer-c/forks", + "keys_url": "https://api.github.com/repos/mreiferson/doozer-c/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/doozer-c/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/doozer-c/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/doozer-c/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/doozer-c/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/doozer-c/events", + "assignees_url": "https://api.github.com/repos/mreiferson/doozer-c/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/doozer-c/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/doozer-c/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/doozer-c/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/doozer-c/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/doozer-c/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/doozer-c/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/doozer-c/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/doozer-c/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/doozer-c/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/doozer-c/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/doozer-c/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/doozer-c/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/doozer-c/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/doozer-c/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/doozer-c/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/doozer-c/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/doozer-c/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/doozer-c/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/doozer-c/merges", + "archive_url": "https://api.github.com/repos/mreiferson/doozer-c/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/doozer-c/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/doozer-c/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/doozer-c/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/doozer-c/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/doozer-c/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/doozer-c/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/doozer-c/releases{/id}", + "created_at": "2012-02-08T21:15:33Z", + "updated_at": "2014-04-03T21:58:49Z", + "pushed_at": "2012-11-21T16:46:25Z", + "git_url": "git://github.com/mreiferson/doozer-c.git", + "ssh_url": "git@github.com:mreiferson/doozer-c.git", + "clone_url": "https://github.com/mreiferson/doozer-c.git", + "svn_url": "https://github.com/mreiferson/doozer-c", + "homepage": "", + "size": 158, + "stargazers_count": 0, + "watchers_count": 0, + "language": "C", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 4515795, + "name": "doozerd", + "full_name": "mreiferson/doozerd", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/doozerd", + "description": "A consistent distributed data store.", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/doozerd", + "forks_url": "https://api.github.com/repos/mreiferson/doozerd/forks", + "keys_url": "https://api.github.com/repos/mreiferson/doozerd/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/doozerd/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/doozerd/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/doozerd/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/doozerd/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/doozerd/events", + "assignees_url": "https://api.github.com/repos/mreiferson/doozerd/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/doozerd/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/doozerd/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/doozerd/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/doozerd/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/doozerd/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/doozerd/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/doozerd/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/doozerd/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/doozerd/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/doozerd/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/doozerd/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/doozerd/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/doozerd/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/doozerd/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/doozerd/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/doozerd/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/doozerd/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/doozerd/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/doozerd/merges", + "archive_url": "https://api.github.com/repos/mreiferson/doozerd/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/doozerd/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/doozerd/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/doozerd/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/doozerd/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/doozerd/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/doozerd/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/doozerd/releases{/id}", + "created_at": "2012-06-01T03:41:32Z", + "updated_at": "2013-12-28T19:22:30Z", + "pushed_at": "2013-12-28T19:22:30Z", + "git_url": "git://github.com/mreiferson/doozerd.git", + "ssh_url": "git@github.com:mreiferson/doozerd.git", + "clone_url": "https://github.com/mreiferson/doozerd.git", + "svn_url": "https://github.com/mreiferson/doozerd", + "homepage": "", + "size": 3135, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 8172002, + "name": "e", + "full_name": "mreiferson/e", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/e", + "description": "Library containing high-performance datastructures and utilities for C++", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/e", + "forks_url": "https://api.github.com/repos/mreiferson/e/forks", + "keys_url": "https://api.github.com/repos/mreiferson/e/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/e/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/e/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/e/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/e/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/e/events", + "assignees_url": "https://api.github.com/repos/mreiferson/e/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/e/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/e/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/e/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/e/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/e/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/e/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/e/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/e/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/e/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/e/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/e/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/e/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/e/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/e/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/e/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/e/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/e/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/e/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/e/merges", + "archive_url": "https://api.github.com/repos/mreiferson/e/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/e/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/e/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/e/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/e/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/e/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/e/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/e/releases{/id}", + "created_at": "2013-02-13T02:42:55Z", + "updated_at": "2013-02-18T21:10:07Z", + "pushed_at": "2013-02-13T02:45:16Z", + "git_url": "git://github.com/mreiferson/e.git", + "ssh_url": "git@github.com:mreiferson/e.git", + "clone_url": "https://github.com/mreiferson/e.git", + "svn_url": "https://github.com/mreiferson/e", + "homepage": "", + "size": 437, + "stargazers_count": 0, + "watchers_count": 0, + "language": "C++", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 2792604, + "name": "encfs-macfusion2", + "full_name": "mreiferson/encfs-macfusion2", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/encfs-macfusion2", + "description": "enhanced version of encfs-macfusion2 plugin http://code.google.com/p/encfs-macfusion2/", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/encfs-macfusion2", + "forks_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/forks", + "keys_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/events", + "assignees_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/merges", + "archive_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/releases{/id}", + "created_at": "2011-11-17T01:58:01Z", + "updated_at": "2013-10-22T06:29:03Z", + "pushed_at": "2011-11-17T02:13:15Z", + "git_url": "git://github.com/mreiferson/encfs-macfusion2.git", + "ssh_url": "git@github.com:mreiferson/encfs-macfusion2.git", + "clone_url": "https://github.com/mreiferson/encfs-macfusion2.git", + "svn_url": "https://github.com/mreiferson/encfs-macfusion2", + "homepage": "", + "size": 195, + "stargazers_count": 4, + "watchers_count": 4, + "language": "Objective-C", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 1, + "mirror_url": null, + "open_issues_count": 0, + "forks": 1, + "open_issues": 0, + "watchers": 4, + "default_branch": "master" + }, + { + "id": 5263991, + "name": "file2http", + "full_name": "mreiferson/file2http", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/file2http", + "description": "spray a line-oriented file at an HTTP endpoint", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/file2http", + "forks_url": "https://api.github.com/repos/mreiferson/file2http/forks", + "keys_url": "https://api.github.com/repos/mreiferson/file2http/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/file2http/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/file2http/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/file2http/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/file2http/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/file2http/events", + "assignees_url": "https://api.github.com/repos/mreiferson/file2http/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/file2http/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/file2http/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/file2http/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/file2http/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/file2http/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/file2http/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/file2http/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/file2http/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/file2http/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/file2http/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/file2http/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/file2http/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/file2http/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/file2http/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/file2http/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/file2http/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/file2http/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/file2http/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/file2http/merges", + "archive_url": "https://api.github.com/repos/mreiferson/file2http/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/file2http/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/file2http/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/file2http/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/file2http/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/file2http/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/file2http/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/file2http/releases{/id}", + "created_at": "2012-08-01T19:56:16Z", + "updated_at": "2013-01-11T13:21:40Z", + "pushed_at": "2012-12-21T15:44:32Z", + "git_url": "git://github.com/mreiferson/file2http.git", + "ssh_url": "git@github.com:mreiferson/file2http.git", + "clone_url": "https://github.com/mreiferson/file2http.git", + "svn_url": "https://github.com/mreiferson/file2http", + "homepage": "", + "size": 96, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 15291117, + "name": "gablog", + "full_name": "mreiferson/gablog", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/gablog", + "description": "Gopher Academy Blog -- fork of go.blog", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/gablog", + "forks_url": "https://api.github.com/repos/mreiferson/gablog/forks", + "keys_url": "https://api.github.com/repos/mreiferson/gablog/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/gablog/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/gablog/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/gablog/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/gablog/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/gablog/events", + "assignees_url": "https://api.github.com/repos/mreiferson/gablog/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/gablog/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/gablog/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/gablog/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/gablog/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/gablog/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/gablog/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/gablog/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/gablog/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/gablog/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/gablog/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/gablog/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/gablog/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/gablog/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/gablog/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/gablog/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/gablog/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/gablog/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/gablog/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/gablog/merges", + "archive_url": "https://api.github.com/repos/mreiferson/gablog/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/gablog/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/gablog/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/gablog/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/gablog/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/gablog/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/gablog/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/gablog/releases{/id}", + "created_at": "2013-12-18T18:38:37Z", + "updated_at": "2013-12-20T22:39:33Z", + "pushed_at": "2013-12-20T22:21:52Z", + "git_url": "git://github.com/mreiferson/gablog.git", + "ssh_url": "git@github.com:mreiferson/gablog.git", + "clone_url": "https://github.com/mreiferson/gablog.git", + "svn_url": "https://github.com/mreiferson/gablog", + "homepage": "http://blog.gopheracademy.com", + "size": 7911, + "stargazers_count": 0, + "watchers_count": 0, + "language": "CSS", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 12223286, + "name": "git-open-pull", + "full_name": "mreiferson/git-open-pull", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/git-open-pull", + "description": "convert a github issue into a pull request", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/git-open-pull", + "forks_url": "https://api.github.com/repos/mreiferson/git-open-pull/forks", + "keys_url": "https://api.github.com/repos/mreiferson/git-open-pull/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/git-open-pull/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/git-open-pull/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/git-open-pull/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/git-open-pull/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/git-open-pull/events", + "assignees_url": "https://api.github.com/repos/mreiferson/git-open-pull/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/git-open-pull/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/git-open-pull/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/git-open-pull/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/git-open-pull/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/git-open-pull/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/git-open-pull/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/git-open-pull/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/git-open-pull/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/git-open-pull/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/git-open-pull/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/git-open-pull/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/git-open-pull/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/git-open-pull/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/git-open-pull/merges", + "archive_url": "https://api.github.com/repos/mreiferson/git-open-pull/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/git-open-pull/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/git-open-pull/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/git-open-pull/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/git-open-pull/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/git-open-pull/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/git-open-pull/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/git-open-pull/releases{/id}", + "created_at": "2013-08-19T17:45:47Z", + "updated_at": "2014-03-03T19:50:11Z", + "pushed_at": "2014-03-03T19:50:09Z", + "git_url": "git://github.com/mreiferson/git-open-pull.git", + "ssh_url": "git@github.com:mreiferson/git-open-pull.git", + "clone_url": "https://github.com/mreiferson/git-open-pull.git", + "svn_url": "https://github.com/mreiferson/git-open-pull", + "homepage": "https://github.com/jehiah/git-open-pull", + "size": 155, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Shell", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 9547968, + "name": "go-hostpool", + "full_name": "mreiferson/go-hostpool", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-hostpool", + "description": "Intelligently and flexibly pool among multiple hosts from your Go application", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/go-hostpool", + "forks_url": "https://api.github.com/repos/mreiferson/go-hostpool/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-hostpool/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-hostpool/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-hostpool/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-hostpool/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-hostpool/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-hostpool/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-hostpool/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-hostpool/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-hostpool/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-hostpool/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-hostpool/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-hostpool/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-hostpool/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-hostpool/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-hostpool/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-hostpool/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-hostpool/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-hostpool/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-hostpool/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-hostpool/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-hostpool/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-hostpool/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-hostpool/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-hostpool/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-hostpool/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-hostpool/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-hostpool/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-hostpool/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-hostpool/releases{/id}", + "created_at": "2013-04-19T15:06:04Z", + "updated_at": "2013-04-30T14:17:45Z", + "pushed_at": "2013-04-30T14:17:44Z", + "git_url": "git://github.com/mreiferson/go-hostpool.git", + "ssh_url": "git@github.com:mreiferson/go-hostpool.git", + "clone_url": "https://github.com/mreiferson/go-hostpool.git", + "svn_url": "https://github.com/mreiferson/go-hostpool", + "homepage": null, + "size": 98, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 3488675, + "name": "go-httpclient", + "full_name": "mreiferson/go-httpclient", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-httpclient", + "description": "a Go HTTP client with timeouts", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/go-httpclient", + "forks_url": "https://api.github.com/repos/mreiferson/go-httpclient/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-httpclient/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-httpclient/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-httpclient/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-httpclient/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-httpclient/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-httpclient/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-httpclient/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-httpclient/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-httpclient/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-httpclient/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-httpclient/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-httpclient/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-httpclient/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-httpclient/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-httpclient/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-httpclient/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-httpclient/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-httpclient/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-httpclient/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-httpclient/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-httpclient/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-httpclient/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-httpclient/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-httpclient/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-httpclient/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-httpclient/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-httpclient/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-httpclient/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-httpclient/releases{/id}", + "created_at": "2012-02-19T21:51:42Z", + "updated_at": "2014-07-19T16:41:18Z", + "pushed_at": "2014-04-25T16:53:03Z", + "git_url": "git://github.com/mreiferson/go-httpclient.git", + "ssh_url": "git@github.com:mreiferson/go-httpclient.git", + "clone_url": "https://github.com/mreiferson/go-httpclient.git", + "svn_url": "https://github.com/mreiferson/go-httpclient", + "homepage": "", + "size": 362, + "stargazers_count": 167, + "watchers_count": 167, + "language": "Go", + "has_issues": true, + "has_downloads": true, + "has_wiki": false, + "forks_count": 21, + "mirror_url": null, + "open_issues_count": 0, + "forks": 21, + "open_issues": 0, + "watchers": 167, + "default_branch": "master" + }, + { + "id": 3924124, + "name": "go-install-as", + "full_name": "mreiferson/go-install-as", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-install-as", + "description": "a Go tool to install a package with a specific import path", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/go-install-as", + "forks_url": "https://api.github.com/repos/mreiferson/go-install-as/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-install-as/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-install-as/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-install-as/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-install-as/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-install-as/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-install-as/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-install-as/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-install-as/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-install-as/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-install-as/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-install-as/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-install-as/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-install-as/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-install-as/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-install-as/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-install-as/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-install-as/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-install-as/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-install-as/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-install-as/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-install-as/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-install-as/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-install-as/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-install-as/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-install-as/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-install-as/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-install-as/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-install-as/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-install-as/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-install-as/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-install-as/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-install-as/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-install-as/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-install-as/releases{/id}", + "created_at": "2012-04-04T00:17:37Z", + "updated_at": "2014-06-29T20:11:46Z", + "pushed_at": "2012-09-24T16:08:50Z", + "git_url": "git://github.com/mreiferson/go-install-as.git", + "ssh_url": "git@github.com:mreiferson/go-install-as.git", + "clone_url": "https://github.com/mreiferson/go-install-as.git", + "svn_url": "https://github.com/mreiferson/go-install-as", + "homepage": "", + "size": 107, + "stargazers_count": 53, + "watchers_count": 53, + "language": "Shell", + "has_issues": true, + "has_downloads": true, + "has_wiki": false, + "forks_count": 2, + "mirror_url": null, + "open_issues_count": 0, + "forks": 2, + "open_issues": 0, + "watchers": 53, + "default_branch": "master" + }, + { + "id": 4744067, + "name": "go-notify", + "full_name": "mreiferson/go-notify", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-notify", + "description": "a Go package to observe notable events in a decoupled fashion", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/go-notify", + "forks_url": "https://api.github.com/repos/mreiferson/go-notify/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-notify/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-notify/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-notify/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-notify/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-notify/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-notify/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-notify/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-notify/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-notify/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-notify/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-notify/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-notify/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-notify/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-notify/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-notify/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-notify/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-notify/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-notify/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-notify/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-notify/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-notify/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-notify/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-notify/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-notify/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-notify/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-notify/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-notify/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-notify/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-notify/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-notify/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-notify/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-notify/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-notify/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-notify/releases{/id}", + "created_at": "2012-06-21T20:30:43Z", + "updated_at": "2013-01-10T18:07:58Z", + "pushed_at": "2012-06-21T20:30:22Z", + "git_url": "git://github.com/mreiferson/go-notify.git", + "ssh_url": "git@github.com:mreiferson/go-notify.git", + "clone_url": "https://github.com/mreiferson/go-notify.git", + "svn_url": "https://github.com/mreiferson/go-notify", + "homepage": null, + "size": 68, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 12449360, + "name": "go-nsq", + "full_name": "mreiferson/go-nsq", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-nsq", + "description": "the official Go package for NSQ", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/go-nsq", + "forks_url": "https://api.github.com/repos/mreiferson/go-nsq/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-nsq/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-nsq/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-nsq/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-nsq/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-nsq/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-nsq/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-nsq/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-nsq/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-nsq/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-nsq/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-nsq/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-nsq/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-nsq/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-nsq/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-nsq/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-nsq/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-nsq/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-nsq/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-nsq/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-nsq/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-nsq/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-nsq/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-nsq/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-nsq/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-nsq/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-nsq/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-nsq/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-nsq/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-nsq/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-nsq/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-nsq/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-nsq/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-nsq/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-nsq/releases{/id}", + "created_at": "2013-08-29T02:07:54Z", + "updated_at": "2014-06-29T13:56:36Z", + "pushed_at": "2014-07-20T16:44:32Z", + "git_url": "git://github.com/mreiferson/go-nsq.git", + "ssh_url": "git@github.com:mreiferson/go-nsq.git", + "clone_url": "https://github.com/mreiferson/go-nsq.git", + "svn_url": "https://github.com/mreiferson/go-nsq", + "homepage": "", + "size": 1783, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": false, + "forks_count": 1, + "mirror_url": null, + "open_issues_count": 0, + "forks": 1, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 16654468, + "name": "go-options", + "full_name": "mreiferson/go-options", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-options", + "description": "a Go package to structure and resolve options", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/go-options", + "forks_url": "https://api.github.com/repos/mreiferson/go-options/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-options/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-options/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-options/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-options/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-options/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-options/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-options/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-options/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-options/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-options/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-options/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-options/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-options/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-options/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-options/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-options/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-options/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-options/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-options/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-options/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-options/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-options/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-options/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-options/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-options/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-options/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-options/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-options/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-options/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-options/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-options/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-options/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-options/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-options/releases{/id}", + "created_at": "2014-02-08T22:19:33Z", + "updated_at": "2014-02-16T00:39:59Z", + "pushed_at": "2014-02-16T00:39:58Z", + "git_url": "git://github.com/mreiferson/go-options.git", + "ssh_url": "git@github.com:mreiferson/go-options.git", + "clone_url": "https://github.com/mreiferson/go-options.git", + "svn_url": "https://github.com/mreiferson/go-options", + "homepage": null, + "size": 128, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Go", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 3924909, + "name": "go-simplejson", + "full_name": "mreiferson/go-simplejson", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-simplejson", + "description": "a Go package to interact with arbitrary JSON", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/go-simplejson", + "forks_url": "https://api.github.com/repos/mreiferson/go-simplejson/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-simplejson/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-simplejson/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-simplejson/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-simplejson/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-simplejson/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-simplejson/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-simplejson/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-simplejson/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-simplejson/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-simplejson/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-simplejson/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-simplejson/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-simplejson/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-simplejson/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-simplejson/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-simplejson/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-simplejson/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-simplejson/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-simplejson/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-simplejson/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-simplejson/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-simplejson/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-simplejson/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-simplejson/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-simplejson/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-simplejson/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-simplejson/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-simplejson/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-simplejson/releases{/id}", + "created_at": "2012-04-04T02:36:33Z", + "updated_at": "2014-06-25T01:24:01Z", + "pushed_at": "2014-06-30T15:13:50Z", + "git_url": "git://github.com/mreiferson/go-simplejson.git", + "ssh_url": "git@github.com:mreiferson/go-simplejson.git", + "clone_url": "https://github.com/mreiferson/go-simplejson.git", + "svn_url": "https://github.com/mreiferson/go-simplejson", + "homepage": "", + "size": 210, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": false, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 8614089, + "name": "go-simplelog", + "full_name": "mreiferson/go-simplelog", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-simplelog", + "description": "a simple logging package for Go (inspired by Tornado)", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/go-simplelog", + "forks_url": "https://api.github.com/repos/mreiferson/go-simplelog/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-simplelog/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-simplelog/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-simplelog/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-simplelog/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-simplelog/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-simplelog/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-simplelog/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-simplelog/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-simplelog/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-simplelog/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-simplelog/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-simplelog/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-simplelog/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-simplelog/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-simplelog/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-simplelog/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-simplelog/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-simplelog/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-simplelog/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-simplelog/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-simplelog/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-simplelog/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-simplelog/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-simplelog/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-simplelog/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-simplelog/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-simplelog/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-simplelog/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-simplelog/releases{/id}", + "created_at": "2013-03-06T21:53:48Z", + "updated_at": "2013-10-11T22:49:05Z", + "pushed_at": "2013-03-31T23:20:11Z", + "git_url": "git://github.com/mreiferson/go-simplelog.git", + "ssh_url": "git@github.com:mreiferson/go-simplelog.git", + "clone_url": "https://github.com/mreiferson/go-simplelog.git", + "svn_url": "https://github.com/mreiferson/go-simplelog", + "homepage": null, + "size": 140, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Go", + "has_issues": true, + "has_downloads": true, + "has_wiki": false, + "forks_count": 1, + "mirror_url": null, + "open_issues_count": 0, + "forks": 1, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 12498288, + "name": "go-snappystream", + "full_name": "mreiferson/go-snappystream", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-snappystream", + "description": "a Go package for framed snappy streams", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/go-snappystream", + "forks_url": "https://api.github.com/repos/mreiferson/go-snappystream/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-snappystream/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-snappystream/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-snappystream/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-snappystream/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-snappystream/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-snappystream/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-snappystream/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-snappystream/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-snappystream/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-snappystream/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-snappystream/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-snappystream/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-snappystream/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-snappystream/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-snappystream/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-snappystream/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-snappystream/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-snappystream/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-snappystream/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-snappystream/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-snappystream/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-snappystream/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-snappystream/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-snappystream/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-snappystream/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-snappystream/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-snappystream/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-snappystream/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-snappystream/releases{/id}", + "created_at": "2013-08-31T00:41:11Z", + "updated_at": "2014-07-20T07:52:45Z", + "pushed_at": "2013-09-17T21:00:14Z", + "git_url": "git://github.com/mreiferson/go-snappystream.git", + "ssh_url": "git@github.com:mreiferson/go-snappystream.git", + "clone_url": "https://github.com/mreiferson/go-snappystream.git", + "svn_url": "https://github.com/mreiferson/go-snappystream", + "homepage": null, + "size": 184, + "stargazers_count": 21, + "watchers_count": 21, + "language": "Go", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 1, + "mirror_url": null, + "open_issues_count": 0, + "forks": 1, + "open_issues": 0, + "watchers": 21, + "default_branch": "master" + }, + { + "id": 5183238, + "name": "go-stat", + "full_name": "mreiferson/go-stat", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-stat", + "description": "performant instrumentation/profiling for Go", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/go-stat", + "forks_url": "https://api.github.com/repos/mreiferson/go-stat/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-stat/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-stat/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-stat/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-stat/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-stat/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-stat/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-stat/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-stat/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-stat/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-stat/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-stat/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-stat/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-stat/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-stat/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-stat/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-stat/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-stat/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-stat/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-stat/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-stat/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-stat/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-stat/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-stat/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-stat/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-stat/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-stat/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-stat/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-stat/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-stat/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-stat/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-stat/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-stat/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-stat/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-stat/releases{/id}", + "created_at": "2012-07-25T19:03:42Z", + "updated_at": "2014-01-10T04:39:14Z", + "pushed_at": "2012-07-25T19:04:37Z", + "git_url": "git://github.com/mreiferson/go-stat.git", + "ssh_url": "git@github.com:mreiferson/go-stat.git", + "clone_url": "https://github.com/mreiferson/go-stat.git", + "svn_url": "https://github.com/mreiferson/go-stat", + "homepage": null, + "size": 96, + "stargazers_count": 1, + "watchers_count": 1, + "language": "Go", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 8662365, + "name": "go-ujson", + "full_name": "mreiferson/go-ujson", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/go-ujson", + "description": "a pure Go port of ultrajson", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/go-ujson", + "forks_url": "https://api.github.com/repos/mreiferson/go-ujson/forks", + "keys_url": "https://api.github.com/repos/mreiferson/go-ujson/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/go-ujson/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/go-ujson/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/go-ujson/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/go-ujson/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/go-ujson/events", + "assignees_url": "https://api.github.com/repos/mreiferson/go-ujson/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/go-ujson/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/go-ujson/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/go-ujson/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/go-ujson/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/go-ujson/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/go-ujson/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/go-ujson/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/go-ujson/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/go-ujson/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/go-ujson/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/go-ujson/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/go-ujson/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/go-ujson/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/go-ujson/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/go-ujson/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/go-ujson/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/go-ujson/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/go-ujson/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/go-ujson/merges", + "archive_url": "https://api.github.com/repos/mreiferson/go-ujson/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/go-ujson/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/go-ujson/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/go-ujson/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/go-ujson/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/go-ujson/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/go-ujson/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/go-ujson/releases{/id}", + "created_at": "2013-03-08T23:57:54Z", + "updated_at": "2014-06-26T01:50:40Z", + "pushed_at": "2013-11-10T19:49:16Z", + "git_url": "git://github.com/mreiferson/go-ujson.git", + "ssh_url": "git@github.com:mreiferson/go-ujson.git", + "clone_url": "https://github.com/mreiferson/go-ujson.git", + "svn_url": "https://github.com/mreiferson/go-ujson", + "homepage": "", + "size": 140, + "stargazers_count": 31, + "watchers_count": 31, + "language": "Go", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 8, + "mirror_url": null, + "open_issues_count": 0, + "forks": 8, + "open_issues": 0, + "watchers": 31, + "default_branch": "master" + }, + { + "id": 12815437, + "name": "godep", + "full_name": "mreiferson/godep", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/godep", + "description": "dependency tool for go", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/godep", + "forks_url": "https://api.github.com/repos/mreiferson/godep/forks", + "keys_url": "https://api.github.com/repos/mreiferson/godep/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/godep/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/godep/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/godep/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/godep/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/godep/events", + "assignees_url": "https://api.github.com/repos/mreiferson/godep/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/godep/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/godep/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/godep/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/godep/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/godep/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/godep/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/godep/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/godep/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/godep/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/godep/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/godep/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/godep/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/godep/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/godep/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/godep/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/godep/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/godep/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/godep/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/godep/merges", + "archive_url": "https://api.github.com/repos/mreiferson/godep/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/godep/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/godep/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/godep/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/godep/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/godep/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/godep/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/godep/releases{/id}", + "created_at": "2013-09-13T17:36:10Z", + "updated_at": "2014-03-21T02:53:20Z", + "pushed_at": "2014-01-05T18:07:02Z", + "git_url": "git://github.com/mreiferson/godep.git", + "ssh_url": "git@github.com:mreiferson/godep.git", + "clone_url": "https://github.com/mreiferson/godep.git", + "svn_url": "https://github.com/mreiferson/godep", + "homepage": "http://godoc.org/github.com/kr/godep", + "size": 196, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Go", + "has_issues": false, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + }, + { + "id": 2862096, + "name": "hajiworld", + "full_name": "mreiferson/hajiworld", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/hajiworld", + "description": "super mario clone (1999)", + "fork": false, + "url": "https://api.github.com/repos/mreiferson/hajiworld", + "forks_url": "https://api.github.com/repos/mreiferson/hajiworld/forks", + "keys_url": "https://api.github.com/repos/mreiferson/hajiworld/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/hajiworld/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/hajiworld/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/hajiworld/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/hajiworld/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/hajiworld/events", + "assignees_url": "https://api.github.com/repos/mreiferson/hajiworld/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/hajiworld/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/hajiworld/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/hajiworld/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/hajiworld/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/hajiworld/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/hajiworld/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/hajiworld/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/hajiworld/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/hajiworld/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/hajiworld/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/hajiworld/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/hajiworld/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/hajiworld/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/hajiworld/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/hajiworld/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/hajiworld/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/hajiworld/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/hajiworld/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/hajiworld/merges", + "archive_url": "https://api.github.com/repos/mreiferson/hajiworld/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/hajiworld/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/hajiworld/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/hajiworld/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/hajiworld/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/hajiworld/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/hajiworld/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/hajiworld/releases{/id}", + "created_at": "2011-11-27T18:05:02Z", + "updated_at": "2014-01-08T14:10:43Z", + "pushed_at": "2011-11-29T02:49:49Z", + "git_url": "git://github.com/mreiferson/hajiworld.git", + "ssh_url": "git@github.com:mreiferson/hajiworld.git", + "clone_url": "https://github.com/mreiferson/hajiworld.git", + "svn_url": "https://github.com/mreiferson/hajiworld", + "homepage": "", + "size": 27872, + "stargazers_count": 1, + "watchers_count": 1, + "language": "C++", + "has_issues": true, + "has_downloads": true, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 1, + "default_branch": "master" + }, + { + "id": 14853562, + "name": "homebrew", + "full_name": "mreiferson/homebrew", + "owner": { + "login": "mreiferson", + "id": 187441, + "avatar_url": "https://avatars.githubusercontent.com/u/187441?", + "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", + "url": "https://api.github.com/users/mreiferson", + "html_url": "https://github.com/mreiferson", + "followers_url": "https://api.github.com/users/mreiferson/followers", + "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", + "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", + "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", + "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", + "organizations_url": "https://api.github.com/users/mreiferson/orgs", + "repos_url": "https://api.github.com/users/mreiferson/repos", + "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", + "received_events_url": "https://api.github.com/users/mreiferson/received_events", + "type": "User", + "site_admin": false + }, + "private": false, + "html_url": "https://github.com/mreiferson/homebrew", + "description": "The missing package manager for OS X.", + "fork": true, + "url": "https://api.github.com/repos/mreiferson/homebrew", + "forks_url": "https://api.github.com/repos/mreiferson/homebrew/forks", + "keys_url": "https://api.github.com/repos/mreiferson/homebrew/keys{/key_id}", + "collaborators_url": "https://api.github.com/repos/mreiferson/homebrew/collaborators{/collaborator}", + "teams_url": "https://api.github.com/repos/mreiferson/homebrew/teams", + "hooks_url": "https://api.github.com/repos/mreiferson/homebrew/hooks", + "issue_events_url": "https://api.github.com/repos/mreiferson/homebrew/issues/events{/number}", + "events_url": "https://api.github.com/repos/mreiferson/homebrew/events", + "assignees_url": "https://api.github.com/repos/mreiferson/homebrew/assignees{/user}", + "branches_url": "https://api.github.com/repos/mreiferson/homebrew/branches{/branch}", + "tags_url": "https://api.github.com/repos/mreiferson/homebrew/tags", + "blobs_url": "https://api.github.com/repos/mreiferson/homebrew/git/blobs{/sha}", + "git_tags_url": "https://api.github.com/repos/mreiferson/homebrew/git/tags{/sha}", + "git_refs_url": "https://api.github.com/repos/mreiferson/homebrew/git/refs{/sha}", + "trees_url": "https://api.github.com/repos/mreiferson/homebrew/git/trees{/sha}", + "statuses_url": "https://api.github.com/repos/mreiferson/homebrew/statuses/{sha}", + "languages_url": "https://api.github.com/repos/mreiferson/homebrew/languages", + "stargazers_url": "https://api.github.com/repos/mreiferson/homebrew/stargazers", + "contributors_url": "https://api.github.com/repos/mreiferson/homebrew/contributors", + "subscribers_url": "https://api.github.com/repos/mreiferson/homebrew/subscribers", + "subscription_url": "https://api.github.com/repos/mreiferson/homebrew/subscription", + "commits_url": "https://api.github.com/repos/mreiferson/homebrew/commits{/sha}", + "git_commits_url": "https://api.github.com/repos/mreiferson/homebrew/git/commits{/sha}", + "comments_url": "https://api.github.com/repos/mreiferson/homebrew/comments{/number}", + "issue_comment_url": "https://api.github.com/repos/mreiferson/homebrew/issues/comments/{number}", + "contents_url": "https://api.github.com/repos/mreiferson/homebrew/contents/{+path}", + "compare_url": "https://api.github.com/repos/mreiferson/homebrew/compare/{base}...{head}", + "merges_url": "https://api.github.com/repos/mreiferson/homebrew/merges", + "archive_url": "https://api.github.com/repos/mreiferson/homebrew/{archive_format}{/ref}", + "downloads_url": "https://api.github.com/repos/mreiferson/homebrew/downloads", + "issues_url": "https://api.github.com/repos/mreiferson/homebrew/issues{/number}", + "pulls_url": "https://api.github.com/repos/mreiferson/homebrew/pulls{/number}", + "milestones_url": "https://api.github.com/repos/mreiferson/homebrew/milestones{/number}", + "notifications_url": "https://api.github.com/repos/mreiferson/homebrew/notifications{?since,all,participating}", + "labels_url": "https://api.github.com/repos/mreiferson/homebrew/labels{/name}", + "releases_url": "https://api.github.com/repos/mreiferson/homebrew/releases{/id}", + "created_at": "2013-12-02T05:20:40Z", + "updated_at": "2014-02-17T17:19:19Z", + "pushed_at": "2014-02-17T17:06:03Z", + "git_url": "git://github.com/mreiferson/homebrew.git", + "ssh_url": "git@github.com:mreiferson/homebrew.git", + "clone_url": "https://github.com/mreiferson/homebrew.git", + "svn_url": "https://github.com/mreiferson/homebrew", + "homepage": "http://brew.sh", + "size": 29725, + "stargazers_count": 0, + "watchers_count": 0, + "language": "Ruby", + "has_issues": false, + "has_downloads": false, + "has_wiki": true, + "forks_count": 0, + "mirror_url": null, + "open_issues_count": 0, + "forks": 0, + "open_issues": 0, + "watchers": 0, + "default_branch": "master" + } +] +`) diff --git a/vendor/github.com/mreiferson/go-snappystream/reader_test.go b/vendor/github.com/mreiferson/go-snappystream/reader_test.go new file mode 100644 index 0000000000..bb74ce461a --- /dev/null +++ b/vendor/github.com/mreiferson/go-snappystream/reader_test.go @@ -0,0 +1,650 @@ +package snappystream + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + + "github.com/mreiferson/go-snappystream/snappy-go" +) + +// This test checks that padding and reserved skippable blocks are ignored by +// the reader. +func TestReader_skippable(t *testing.T) { + var buf bytes.Buffer + // write some blocks with injected padding/skippable blocks + w := NewWriter(&buf) + write := func(p []byte) (int, error) { + return w.Write(p) + } + writepad := func(b byte, n int) (int, error) { + return buf.Write(opaqueChunk(b, n)) + } + _, err := write([]byte("hello")) + if err != nil { + t.Fatalf("write error: %v", err) + } + _, err = writepad(0xfe, 100) // normal padding + if err != nil { + t.Fatalf("write error: %v", err) + } + _, err = write([]byte(" ")) + if err != nil { + t.Fatalf("write error: %v", err) + } + _, err = writepad(0xa0, 100) // reserved skippable block + if err != nil { + t.Fatalf("write error: %v", err) + } + _, err = writepad(0xfe, MaxBlockSize) // normal padding + if err != nil { + t.Fatalf("write error: %v", err) + } + _, err = write([]byte("padding")) + if err != nil { + t.Fatalf("write error: %v", err) + } + + p, err := ioutil.ReadAll(NewReader(&buf, true)) + if err != nil { + t.Fatalf("read error: %v", err) + } + if string(p) != "hello padding" { + t.Fatalf("read: unexpected content %q", string(p)) + } +} + +// This test checks that reserved unskippable blocks are cause decoder errors. +func TestReader_unskippable(t *testing.T) { + var buf bytes.Buffer + // write some blocks with injected padding/skippable blocks + w := NewWriter(&buf) + write := func(p []byte) (int, error) { + return w.Write(p) + } + writepad := func(b byte, n int) (int, error) { + return buf.Write(opaqueChunk(b, n)) + } + _, err := write([]byte("unskippable")) + if err != nil { + t.Fatalf("write error: %v", err) + } + _, err = writepad(0x50, 100) // unskippable reserved block + if err != nil { + t.Fatalf("write error: %v", err) + } + _, err = write([]byte(" blocks")) + if err != nil { + t.Fatalf("write error: %v", err) + } + + _, err = ioutil.ReadAll(NewReader(&buf, true)) + if err == nil { + t.Fatalf("read success") + } +} + +func TestReaderStreamID(t *testing.T) { + data := []byte("a snappy-framed data stream") + var buf bytes.Buffer + w := NewWriter(&buf) + _, err := w.Write(data) + if err != nil { + t.Fatal(err) + } + + stream := buf.Bytes() + + // sanity check: the stream can be decoded and starts with streamID + r := NewReader(bytes.NewReader(stream), true) + _, err = ioutil.ReadAll(r) + if err != nil { + t.Fatalf("read: %v", err) + } + if !bytes.HasPrefix(stream, streamID) { + t.Fatal("missing stream id") + } + + // streamNoID is valid except for a missing the streamID block + streamNoID := bytes.TrimPrefix(stream, streamID) + r = NewReader(bytes.NewReader(streamNoID), true) + n, err := r.Read(make([]byte, 1)) + if err == nil { + t.Fatalf("read: expected an error reading input missing a stream identifier block") + } + if n != 0 { + t.Fatalf("read: read non-zero number of bytes %d", n) + } + n, err = r.Read(make([]byte, 1)) + if err == nil { + t.Fatalf("read: successful read after missing stream id error") + } + if n != 0 { + t.Fatalf("read: read non-zero number of bytes %d after missing stream id error", n) + } +} + +// This test validates the reader successfully decods a padding of maximal +// size, 2^24 - 1. +func TestReader_maxPad(t *testing.T) { + buf := bytes.NewReader(bytes.Join([][]byte{ + streamID, + compressedChunk(t, []byte("a maximal padding chunk")), + opaqueChunk(0xfe, (1<<24)-1), // normal padding + compressedChunk(t, []byte(" is decoded successfully")), + }, nil)) + r := NewReader(buf, true) + p, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("read error: %v", err) + } + if string(p) != "a maximal padding chunk is decoded successfully" { + t.Fatalf("read: unexpected content %q", string(p)) + } +} + +// This test validates the reader successfully decodes a skippable chunk of +// maximal size, 2^24 - 1. +func TestReader_maxSkippable(t *testing.T) { + buf := bytes.NewReader(bytes.Join([][]byte{ + streamID, + compressedChunk(t, []byte("a maximal skippable chunk")), + opaqueChunk(0xce, (1<<24)-1), // reserved skippable chunk + compressedChunk(t, []byte(" is decoded successfully")), + }, nil)) + r := NewReader(buf, true) + p, err := ioutil.ReadAll(r) + if err != nil { + t.Fatalf("read error: %v", err) + } + if string(p) != "a maximal skippable chunk is decoded successfully" { + t.Fatalf("read: unexpected content %q", string(p)) + } +} + +// TestReader_maxBlock validates bounds checking on encoded and decoded data +// (4.2 Compressed Data). +func TestReader_maxBlock(t *testing.T) { + // decompressing a block with compressed length greater than MaxBlockSize + // should succeed. + buf := bytes.NewReader(bytes.Join([][]byte{ + streamID, + compressedChunkGreaterN(t, MaxBlockSize), + }, nil)) + r := NewReader(buf, true) + b, err := ioutil.ReadAll(r) + if err != nil { + t.Fatal(err) + } + if len(b) != MaxBlockSize { + t.Fatalf("bad read (%d bytes)", len(b)) + } + + // decompressing should fail if the block with decompressed length greater + // than MaxBlockSize. + buf = bytes.NewReader(bytes.Join([][]byte{ + streamID, + compressedChunk(t, make([]byte, MaxBlockSize+1)), + }, nil)) + r = NewReader(buf, true) + b, err = ioutil.ReadAll(r) + if err == nil { + t.Fatal("unexpected success") + } + if len(b) > 0 { + t.Fatalf("unexpected read %q", b) + } +} + +// This test validates the reader's behavior encountering unskippable chunks of +// maximal size, 2^24 - 1. The desired error to in this case is one reporting +// an unskippable chunk, not a length error. +func TestReader_maxUnskippable(t *testing.T) { + // the first block should be decoded successfully. + prefix := "a maximal unskippable chunk" + buf := bytes.NewReader(bytes.Join([][]byte{ + streamID, + compressedChunk(t, []byte(prefix)), + opaqueChunk(0x03, (1<<24)-1), // low end of the unskippable range + compressedChunk(t, []byte(" failure must be reported as such")), + }, nil)) + p := make([]byte, len(prefix)) + r := NewReader(buf, true) + n, err := r.Read(p) + if err != nil { + t.Fatalf("read error: %v", err) + } + if n != len(p) { + t.Fatalf("read: short read %d", n) + } + if string(p) != prefix { + t.Fatalf("read: bad value %q", p) + } + + n, err = r.Read(p) + if err == nil { + t.Fatalf("read: expected error") + } + if n > 0 { + t.Fatalf("read: read %d more bytes than expected", n) + } + if !strings.Contains(err.Error(), "unskippable") { + t.Fatalf("read error: %v", err) + } +} + +// This test validates errors returned when data blocks exceed size limits. +func TestReader_blockTooLarge(t *testing.T) { + // the compressed chunk size is within the allowed encoding size + // (maxEncodedBlockSize). but the uncompressed data is larger than allowed. + badstream := bytes.Join([][]byte{ + streamID, + compressedChunk(t, make([]byte, (1<<24)-5)), + }, nil) + r := NewReader(bytes.NewBuffer(badstream), true) + p := make([]byte, 1) + n, err := r.Read(p) + if err == nil { + t.Fatalf("read: expected error") + } + if n != 0 { + t.Fatalf("read: read data from the stream") + } + + // the compressed chunk size is within the allowed encoding size + // (maxEncodedBlockSize). but the uncompressed data is larger than allowed. + badstream = bytes.Join([][]byte{ + streamID, + uncompressedChunk(t, make([]byte, (1<<24)-5)), + }, nil) + r = NewReader(bytes.NewBuffer(badstream), true) + p = make([]byte, 1) + n, err = r.Read(p) + if err == nil { + t.Fatalf("read: expected error") + } + if n != 0 { + t.Fatalf("read: read data from the stream") + } +} + +// This test validates the reader's handling of corrupt chunks. +func TestReader_corruption(t *testing.T) { + // corruptID is a corrupt stream identifier + corruptID := append([]byte(nil), streamID...) + corruptID = bytes.Replace(streamID, []byte("p"), []byte("P"), -1) // corrupt "sNaPpY" data + badstream := corruptID + + r := NewReader(bytes.NewBuffer(badstream), true) + p := make([]byte, 1) + n, err := r.Read(p) + if err == nil { + t.Fatalf("read: expected error") + } + if err == io.EOF { + t.Fatalf("read: unexpected eof") + } + if n != 0 { + t.Fatalf("read: read data from the stream") + } + + corruptID = append([]byte(nil), streamID...) // corrupt the length + corruptID[1] = 0x00 + badstream = corruptID + + r = NewReader(bytes.NewBuffer(badstream), true) + p = make([]byte, 1) + n, err = r.Read(p) + if err == nil { + t.Fatalf("read: expected error") + } + if err == io.EOF { + t.Fatalf("read: unexpected eof") + } + if n != 0 { + t.Fatalf("read: read data from the stream") + } + + // chunk is a valid compressed block + chunk := compressedChunk(t, []byte("a data block")) + + // corrupt is a corrupt chunk + corrupt := append([]byte(nil), chunk...) + copy(corrupt[8:], make([]byte, 10)) // corrupt snappy-encoded data + badstream = bytes.Join([][]byte{ + streamID, + corrupt, + }, nil) + + r = NewReader(bytes.NewBuffer(badstream), true) + p = make([]byte, 1) + n, err = r.Read(p) + if err == nil { + t.Fatalf("read: expected error") + } + if err == io.EOF { + t.Fatalf("read: unexpected eof") + } + if n != 0 { + t.Fatalf("read: read data from the stream") + } + + corrupt = append([]byte(nil), chunk...) + copy(corrupt[4:8], make([]byte, 4)) // crc checksum failure + badstream = bytes.Join([][]byte{ + streamID, + corrupt, + }, nil) + + r = NewReader(bytes.NewBuffer(badstream), true) + p = make([]byte, 1) + n, err = r.Read(p) + if err == nil { + t.Fatalf("read: expected error") + } + if err == io.EOF { + t.Fatalf("read: unexpected eof") + } + if n != 0 { + t.Fatalf("read: read data from the stream") + } +} + +// This test ensures that reader returns io.ErrUnexpectedEOF at the appropriate +// times. io.EOF must be reserved for the case when all data has been +// successfully decoded. +func TestReader_unexpectedEOF(t *testing.T) { + var decodeBuffer [64 << 10]byte + + for _, test := range [][]byte{ + // truncated streamIDs + streamID[:4], + streamID[:len(streamID)-1], + + // truncated data blocks + bytes.Join([][]byte{ + streamID, + compressedChunk(t, bytes.Repeat([]byte("abc"), 100))[:2], + }, nil), + bytes.Join([][]byte{ + streamID, + compressedChunk(t, bytes.Repeat([]byte("abc"), 100))[:7], + }, nil), + + // truncated padding + bytes.Join([][]byte{ + streamID, + opaqueChunk(0xfe, 100)[:1], + }, nil), + bytes.Join([][]byte{ + streamID, + opaqueChunk(0xfe, 100)[:8], + }, nil), + + // truncated skippable chunk + bytes.Join([][]byte{ + streamID, + opaqueChunk(0xcf, 100)[:3], + }, nil), + bytes.Join([][]byte{ + streamID, + opaqueChunk(0xcf, 100)[:7], + }, nil), + + // truncated unskippable chunk + bytes.Join([][]byte{ + streamID, + opaqueChunk(0x03, 100)[:3], + }, nil), + bytes.Join([][]byte{ + streamID, + opaqueChunk(0x03, 100)[:5], + }, nil), + } { + r := NewReader(bytes.NewReader(test), true) + n, err := r.Read(decodeBuffer[:]) + if err == nil { + t.Errorf("read bad streamID: expected error") + } + if err != io.ErrUnexpectedEOF { + t.Errorf("read bad streamID: %v", err) + } + if n != 0 { + t.Errorf("read bad streamID: expected read length %d", n) + } + } +} + +var errNotEnoughEntropy = fmt.Errorf("inadequate entropy in PRNG") + +// compressedChunkGreaterN like compressedChunk produces a single, compressed, +// snappy-framed block. The returned block will have decoded length at most n +// and encoded length greater than n. +func compressedChunkGreaterN(t *testing.T, n int) []byte { + decoded := make([]byte, n) + var numTries int + var encoded []byte + for len(encoded) <= n && numTries < 3 { + numTries++ + nrd, err := io.ReadFull(rand.Reader, decoded) + if err != nil { + t.Errorf("crypto/rand: %v", err) + return nil + } + if nrd != n { + t.Errorf("crypto/rand: bad read (%d bytes)", nrd) + return nil + } + encoded, err = snappy.Encode(encoded[:cap(encoded)], decoded) + if err != nil { + t.Errorf("snappy: %v", err) + return nil + } + } + if len(encoded) <= n { + t.Error(errNotEnoughEntropy) + return nil + } + + return compressedChunk(t, decoded) +} + +// compressedChunk encodes b returning a single, compressed, snappy-framed +// block. compressedChunk can encode source data larger than allowed in the +// specification. +func compressedChunk(t *testing.T, src []byte) []byte { + encoded, err := snappy.Encode(nil, src) + if err != nil { + t.Errorf("snappy: %v", err) + return nil + } + + if len(encoded) > (1<<24)-5 { // account for the 4-byte checksum + t.Errorf("block data too large %d", len(src)) + return nil + } + + chunk := make([]byte, len(encoded)+8) + writeHeader(chunk[:8], blockCompressed, encoded, src) + copy(chunk[8:], encoded) + return chunk +} + +// uncompressedChunk encodes b returning a single, uncompressed, snappy-framed +// block. uncompressedChunk can encode chunks larger than allowed by the +// specification. +func uncompressedChunk(t *testing.T, src []byte) []byte { + if len(src) > (1<<24)-5 { // account for the 4-byte checksum + t.Errorf("block data too large %d", len(src)) + return nil + } + + chunk := make([]byte, len(src)+8) + writeHeader(chunk[:8], blockUncompressed, src, src) + copy(chunk[8:], src) + return chunk +} + +// opaqueChunk returns an opaque b chunk (e.g. padding 0xfe) with length n +// (total length, n+4 bytes). practically useless but good enough for testing. +// the first 4-bytes of data are random to ensure checksums are not being +// verified. +func opaqueChunk(b byte, n int) []byte { + if b == 0 { + b = 0xfe + } + + length := uint32(n) + lengthle := []byte{byte(length), byte(length >> 8), byte(length >> 16)} + checksum := make([]byte, 4) + _, err := rand.Read(checksum) + if err != nil { + panic(err) + } + padbytes := make([]byte, n-4) // let this panic if n < 4 + _, err = rand.Read(padbytes) + if err != nil { + panic(err) + } + + var h []byte + h = append(h, b) + h = append(h, lengthle...) + h = append(h, checksum...) + h = append(h, padbytes...) + return h +} + +func TestReaderWriteTo(t *testing.T) { + var encbuf bytes.Buffer + var decbuf bytes.Buffer + msg := "hello copy interface" + + w := NewWriter(&encbuf) + n, err := io.WriteString(w, msg) + if err != nil { + t.Fatalf("encode: %v", err) + } + if n != len(msg) { + t.Fatalf("encode: %v", err) + } + + r := NewReader(&encbuf, true) + n64, err := r.(*reader).WriteTo(&decbuf) + if err != nil { + t.Fatalf("decode: %v", err) + } + if n64 != int64(len(msg)) { + t.Fatalf("decode: decoded %d bytes %q", n64, decbuf.Bytes()) + } + + decmsg := decbuf.String() + if decmsg != msg { + t.Fatalf("decode: %q", decmsg) + } +} + +func TestReaderWriteToPreviousError(t *testing.T) { + // construct an io.Reader that returns an error on the first read and a + // valid snappy-framed stream on subsequent reads. + var stream io.Reader + stream = encodedString("hello") + stream = readErrorFirst(stream, fmt.Errorf("one time error")) + stream = NewReader(stream, true) + + var buf bytes.Buffer + + // attempt the first read from the stream. + n, err := stream.(*reader).WriteTo(&buf) + if err == nil { + t.Fatalf("error expected") + } + if n != 0 { + t.Fatalf("bytes written to buffer: %q", buf.Bytes()) + } + + // attempt a second read from the stream. + n, err = stream.(*reader).WriteTo(&buf) + if err == nil { + t.Fatalf("error expected") + } + if n != 0 { + t.Fatalf("bytes written to buffer: %q", buf.Bytes()) + } +} + +// readerErrorFirst is an io.Reader that returns an error on the first read. +// readerErrorFirst is used to test that a reader does not attempt to read +// after a read error occurs. +type readerErrorFirst struct { + r io.Reader + err error + count int +} + +func readErrorFirst(r io.Reader, err error) io.Reader { + return &readerErrorFirst{ + r: r, + err: err, + } +} + +func (r *readerErrorFirst) Read(b []byte) (int, error) { + r.count++ + if r.count == 1 { + return 0, r.err + } + return r.r.Read(b) +} + +func TestReaderWriteToWriteError(t *testing.T) { + origmsg := "hello" + stream := NewReader(encodedString(origmsg), true) + + // attempt to write the stream to an io.Writer that will not accept input. + n, err := stream.(*reader).WriteTo(unwritable(fmt.Errorf("cannot write to this writer"))) + if err == nil { + t.Fatalf("error expected") + } + if n != 0 { + t.Fatalf("reported %d written to an unwritable writer", n) + } + + // the decoded message can still be read successfully because the encoded + // stream was not corrupt/broken. + var buf bytes.Buffer + n, err = stream.(*reader).WriteTo(&buf) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if n != int64(len(origmsg)) { + t.Errorf("read %d bytes", n) + } + if buf.String() != origmsg { + t.Errorf("read %q", buf) + } +} + +// writerUnwritable is an io.Writer that always returns an error. +type writerUnwritable struct { + err error +} + +func (w *writerUnwritable) Write([]byte) (int, error) { + return 0, w.err +} + +func unwritable(err error) io.Writer { + return &writerUnwritable{err} +} + +func encodedString(s string) io.Reader { + var buf bytes.Buffer + w := NewWriter(&buf) + io.WriteString(w, s) + return &buf +} diff --git a/vendor/github.com/mreiferson/go-snappystream/readwrite_test.go b/vendor/github.com/mreiferson/go-snappystream/readwrite_test.go new file mode 100644 index 0000000000..d775d925b7 --- /dev/null +++ b/vendor/github.com/mreiferson/go-snappystream/readwrite_test.go @@ -0,0 +1,425 @@ +package snappystream + +import ( + "bytes" + "crypto/rand" + "io" + "io/ioutil" + "testing" +) + +const TestFileSize = 10 << 20 // 10MB + +// dummyBytesReader returns an io.Reader that avoids buffering optimizations +// in io.Copy. This can be considered a 'worst-case' io.Reader as far as writer +// frame alignment goes. +// +// Note: io.Copy uses a 32KB buffer internally as of Go 1.3, but that isn't +// part of its public API (undocumented). +func dummyBytesReader(p []byte) io.Reader { + return ioutil.NopCloser(bytes.NewReader(p)) +} + +func testWriteThenRead(t *testing.T, name string, bs []byte) { + var buf bytes.Buffer + w := NewWriter(&buf) + n, err := io.Copy(w, dummyBytesReader(bs)) + if err != nil { + t.Errorf("write %v: %v", name, err) + return + } + if n != int64(len(bs)) { + t.Errorf("write %v: wrote %d bytes (!= %d)", name, n, len(bs)) + return + } + + enclen := buf.Len() + + r := NewReader(&buf, true) + gotbs, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("read %v: %v", name, err) + return + } + n = int64(len(gotbs)) + if n != int64(len(bs)) { + t.Errorf("read %v: read %d bytes (!= %d)", name, n, len(bs)) + return + } + + if !bytes.Equal(gotbs, bs) { + t.Errorf("%v: unequal decompressed content", name) + return + } + + c := float64(len(bs)) / float64(enclen) + t.Logf("%v compression ratio %.03g (%d byte reduction)", name, c, len(bs)-enclen) +} + +func testBufferedWriteThenRead(t *testing.T, name string, bs []byte) { + var buf bytes.Buffer + w := NewBufferedWriter(&buf) + n, err := io.Copy(w, dummyBytesReader(bs)) + if err != nil { + t.Errorf("write %v: %v", name, err) + return + } + if n != int64(len(bs)) { + t.Errorf("write %v: wrote %d bytes (!= %d)", name, n, len(bs)) + return + } + err = w.Close() + if err != nil { + t.Errorf("close %v: %v", name, err) + return + } + + enclen := buf.Len() + + r := NewReader(&buf, true) + gotbs, err := ioutil.ReadAll(r) + if err != nil { + t.Errorf("read %v: %v", name, err) + return + } + n = int64(len(gotbs)) + if n != int64(len(bs)) { + t.Errorf("read %v: read %d bytes (!= %d)", name, n, len(bs)) + return + } + + if !bytes.Equal(gotbs, bs) { + t.Errorf("%v: unequal decompressed content", name) + return + } + + c := float64(len(bs)) / float64(enclen) + t.Logf("%v compression ratio %.03g (%d byte reduction)", name, c, len(bs)-enclen) +} + +func TestWriterReader(t *testing.T) { + testWriteThenRead(t, "simple", []byte("test")) + testWriteThenRead(t, "manpage", testDataMan) + testWriteThenRead(t, "json", testDataJSON) + + p := make([]byte, TestFileSize) + testWriteThenRead(t, "constant", p) + + _, err := rand.Read(p) + if err != nil { + t.Fatal(err) + } + testWriteThenRead(t, "random", p) + +} + +func TestBufferedWriterReader(t *testing.T) { + testBufferedWriteThenRead(t, "simple", []byte("test")) + testBufferedWriteThenRead(t, "manpage", testDataMan) + testBufferedWriteThenRead(t, "json", testDataJSON) + + p := make([]byte, TestFileSize) + testBufferedWriteThenRead(t, "constant", p) + + _, err := rand.Read(p) + if err != nil { + t.Fatal(err) + } + testBufferedWriteThenRead(t, "random", p) + +} + +func TestWriterChunk(t *testing.T) { + var buf bytes.Buffer + + in := make([]byte, 128000) + + w := NewWriter(&buf) + r := NewReader(&buf, VerifyChecksum) + + n, err := w.Write(in) + if err != nil { + t.Fatalf(err.Error()) + } + if n != len(in) { + t.Fatalf("wrote wrong amount %d != %d", n, len(in)) + } + + out := make([]byte, len(in)) + n, err = io.ReadFull(r, out) + if err != nil { + t.Fatal(err) + } + if n != len(in) { + t.Fatalf("read wrong amount %d != %d", n, len(in)) + } + + if !bytes.Equal(out, in) { + t.Fatalf("bytes not equal %v != %v", out, in) + } +} + +func BenchmarkWriterManpage(b *testing.B) { + benchmarkWriterBytes(b, testDataMan) +} +func BenchmarkBufferedWriterManpage(b *testing.B) { + benchmarkBufferedWriterBytes(b, testDataMan) +} +func BenchmarkBufferedWriterManpageNoCopy(b *testing.B) { + benchmarkBufferedWriterBytesNoCopy(b, testDataMan) +} + +func BenchmarkWriterJSON(b *testing.B) { + benchmarkWriterBytes(b, testDataJSON) +} +func BenchmarkBufferedWriterJSON(b *testing.B) { + benchmarkBufferedWriterBytes(b, testDataJSON) +} +func BenchmarkBufferedWriterJSONNoCopy(b *testing.B) { + benchmarkBufferedWriterBytesNoCopy(b, testDataJSON) +} + +// BenchmarkWriterRandom tests performance encoding effectively uncompressable +// data. +func BenchmarkWriterRandom(b *testing.B) { + benchmarkWriterBytes(b, randBytes(b, TestFileSize)) +} +func BenchmarkBufferedWriterRandom(b *testing.B) { + benchmarkBufferedWriterBytes(b, randBytes(b, TestFileSize)) +} +func BenchmarkBufferedWriterRandomNoCopy(b *testing.B) { + benchmarkBufferedWriterBytesNoCopy(b, randBytes(b, TestFileSize)) +} + +// BenchmarkWriterConstant tests performance encoding maximally compressible +// data. +func BenchmarkWriterConstant(b *testing.B) { + benchmarkWriterBytes(b, make([]byte, TestFileSize)) +} +func BenchmarkBufferedWriterConstant(b *testing.B) { + benchmarkBufferedWriterBytes(b, make([]byte, TestFileSize)) +} +func BenchmarkBufferedWriterConstantNoCopy(b *testing.B) { + benchmarkBufferedWriterBytesNoCopy(b, make([]byte, TestFileSize)) +} + +func benchmarkWriterBytes(b *testing.B, p []byte) { + enc := func() io.WriteCloser { + // wrap the normal writer so that it has a noop Close method. writer + // does not implement ReaderFrom so this does not impact performance. + return &nopWriteCloser{NewWriter(ioutil.Discard)} + } + benchmarkEncode(b, enc, p) +} +func benchmarkBufferedWriterBytes(b *testing.B, p []byte) { + enc := func() io.WriteCloser { + // the writer's ReaderFrom implemention will be used in the benchmark. + return NewBufferedWriter(ioutil.Discard) + } + benchmarkEncode(b, enc, p) +} +func benchmarkBufferedWriterBytesNoCopy(b *testing.B, p []byte) { + enc := func() io.WriteCloser { + // the writer is wrapped as to hide it's ReaderFrom implemention. + return &writeCloserNoCopy{NewBufferedWriter(ioutil.Discard)} + } + benchmarkEncode(b, enc, p) +} + +// benchmarkEncode benchmarks the speed at which bytes can be copied from +// bs into writers created by enc. +func benchmarkEncode(b *testing.B, enc func() io.WriteCloser, bs []byte) { + size := int64(len(bs)) + b.SetBytes(size) + b.StartTimer() + for i := 0; i < b.N; i++ { + w := enc() + n, err := io.Copy(w, dummyBytesReader(bs)) + if err != nil { + b.Fatal(err) + } + if n != size { + b.Fatalf("wrote wrong amount %d != %d", n, size) + } + err = w.Close() + if err != nil { + b.Fatalf("close: %v", err) + } + } + b.StopTimer() +} + +func BenchmarkReaderManpage(b *testing.B) { + encodeAndBenchmarkReader(b, testDataMan) +} +func BenchmarkReaderManpage_buffered(b *testing.B) { + encodeAndBenchmarkReader_buffered(b, testDataMan) +} +func BenchmarkReaderManpageNoCopy(b *testing.B) { + encodeAndBenchmarkReaderNoCopy(b, testDataMan) +} + +func BenchmarkReaderJSON(b *testing.B) { + encodeAndBenchmarkReader(b, testDataJSON) +} +func BenchmarkReaderJSON_buffered(b *testing.B) { + encodeAndBenchmarkReader_buffered(b, testDataJSON) +} +func BenchmarkReaderJSONNoCopy(b *testing.B) { + encodeAndBenchmarkReaderNoCopy(b, testDataJSON) +} + +// BenchmarkReaderRandom tests decoding of effectively uncompressable data. +func BenchmarkReaderRandom(b *testing.B) { + encodeAndBenchmarkReader(b, randBytes(b, TestFileSize)) +} +func BenchmarkReaderRandom_buffered(b *testing.B) { + encodeAndBenchmarkReader_buffered(b, randBytes(b, TestFileSize)) +} +func BenchmarkReaderRandomNoCopy(b *testing.B) { + encodeAndBenchmarkReaderNoCopy(b, randBytes(b, TestFileSize)) +} + +// BenchmarkReaderConstant tests decoding of maximally compressible data. +func BenchmarkReaderConstant(b *testing.B) { + encodeAndBenchmarkReader(b, make([]byte, TestFileSize)) +} +func BenchmarkReaderConstant_buffered(b *testing.B) { + encodeAndBenchmarkReader_buffered(b, make([]byte, TestFileSize)) +} +func BenchmarkReaderConstantNoCopy(b *testing.B) { + encodeAndBenchmarkReaderNoCopy(b, make([]byte, TestFileSize)) +} + +// encodeAndBenchmarkReader is a helper that benchmarks the package +// reader's performance given p encoded as a snappy framed stream. +// +// encodeAndBenchmarkReader benchmarks decoding of streams containing +// (multiple) short frames. +func encodeAndBenchmarkReader(b *testing.B, p []byte) { + enc, err := encodeStreamBytes(p, false) + if err != nil { + b.Fatalf("pre-benchmark compression: %v", err) + } + dec := func(r io.Reader) io.Reader { + return NewReader(r, VerifyChecksum) + } + benchmarkDecode(b, dec, int64(len(p)), enc) +} + +// encodeAndBenchmarkReader_buffered is a helper that benchmarks the +// package reader's performance given p encoded as a snappy framed stream. +// +// encodeAndBenchmarkReader_buffered benchmarks decoding of streams that +// contain at most one short frame (at the end). +func encodeAndBenchmarkReader_buffered(b *testing.B, p []byte) { + enc, err := encodeStreamBytes(p, true) + if err != nil { + b.Fatalf("pre-benchmark compression: %v", err) + } + dec := func(r io.Reader) io.Reader { + return NewReader(r, VerifyChecksum) + } + benchmarkDecode(b, dec, int64(len(p)), enc) +} + +// encodeAndBenchmarkReaderNoCopy is a helper that benchmarks the +// package reader's performance given p encoded as a snappy framed stream. +// encodeAndBenchmarReaderNoCopy avoids use of the reader's io.WriterTo +// interface. +// +// encodeAndBenchmarkReaderNoCopy benchmarks decoding of streams that +// contain at most one short frame (at the end). +func encodeAndBenchmarkReaderNoCopy(b *testing.B, p []byte) { + enc, err := encodeStreamBytes(p, true) + if err != nil { + b.Fatalf("pre-benchmark compression: %v", err) + } + dec := func(r io.Reader) io.Reader { + return ioutil.NopCloser(NewReader(r, VerifyChecksum)) + } + benchmarkDecode(b, dec, int64(len(p)), enc) +} + +// benchmarkDecode runs a benchmark that repeatedly decoded snappy +// framed bytes enc. The length of the decoded result in each iteration must +// equal size. +func benchmarkDecode(b *testing.B, dec func(io.Reader) io.Reader, size int64, enc []byte) { + b.SetBytes(int64(len(enc))) // BUG this is probably wrong + b.ResetTimer() + for i := 0; i < b.N; i++ { + r := dec(bytes.NewReader(enc)) + n, err := io.Copy(ioutil.Discard, r) + if err != nil { + b.Fatalf(err.Error()) + } + if n != size { + b.Fatalf("read wrong amount %d != %d", n, size) + } + } + b.StopTimer() +} + +// encodeStreamBytes is like encodeStream but operates on a byte slice. +// encodeStreamBytes ensures that long streams are not maximally compressed if +// buffer is false. +func encodeStreamBytes(b []byte, buffer bool) ([]byte, error) { + return encodeStream(dummyBytesReader(b), buffer) +} + +// encodeStream encodes data read from r as a snappy framed stream and returns +// the result as a byte slice. if buffer is true the bytes from r are buffered +// to improve the resulting slice's compression ratio. +func encodeStream(r io.Reader, buffer bool) ([]byte, error) { + var buf bytes.Buffer + if !buffer { + w := NewWriter(&buf) + _, err := io.Copy(w, r) + if err != nil { + return nil, err + } + return buf.Bytes(), nil + } + + w := NewBufferedWriter(&buf) + _, err := io.Copy(w, r) + if err != nil { + return nil, err + } + err = w.Close() + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// randBytes reads size bytes from the computer's cryptographic random source. +// the resulting bytes have approximately maximal entropy and are effectively +// uncompressible with any algorithm. +func randBytes(b *testing.B, size int) []byte { + randp := make([]byte, size) + _, err := io.ReadFull(rand.Reader, randp) + if err != nil { + b.Fatal(err) + } + return randp +} + +// writeCloserNoCopy is an io.WriteCloser that simply wraps another +// io.WriteCloser. This is useful for masking implementations for interfaces +// like ReaderFrom which may be opted into use inside functions like io.Copy. +type writeCloserNoCopy struct { + io.WriteCloser +} + +// nopWriteCloser is an io.WriteCloser that has a noop Close method. This type +// has the effect of masking the underlying writer's Close implementation if it +// has one, or satisfying interface implementations for writers that do not +// need to be closing. +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { + return nil +} diff --git a/vendor/github.com/mreiferson/go-snappystream/snappy-go/snappy_test.go b/vendor/github.com/mreiferson/go-snappystream/snappy-go/snappy_test.go new file mode 100644 index 0000000000..7ba839244e --- /dev/null +++ b/vendor/github.com/mreiferson/go-snappystream/snappy-go/snappy_test.go @@ -0,0 +1,261 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "bytes" + "flag" + "fmt" + "io" + "io/ioutil" + "math/rand" + "net/http" + "os" + "path/filepath" + "strings" + "testing" +) + +var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") + +func roundtrip(b, ebuf, dbuf []byte) error { + e, err := Encode(ebuf, b) + if err != nil { + return fmt.Errorf("encoding error: %v", err) + } + d, err := Decode(dbuf, e) + if err != nil { + return fmt.Errorf("decoding error: %v", err) + } + if !bytes.Equal(b, d) { + return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) + } + return nil +} + +func TestEmpty(t *testing.T) { + if err := roundtrip(nil, nil, nil); err != nil { + t.Fatal(err) + } +} + +func TestSmallCopy(t *testing.T) { + for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { + for i := 0; i < 32; i++ { + s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" + if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { + t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) + } + } + } + } +} + +func TestSmallRand(t *testing.T) { + rand.Seed(27354294) + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(rand.Uint32()) + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func TestSmallRegular(t *testing.T) { + for n := 1; n < 20000; n += 23 { + b := make([]byte, n) + for i, _ := range b { + b[i] = uint8(i%10 + 'a') + } + if err := roundtrip(b, nil, nil); err != nil { + t.Fatal(err) + } + } +} + +func benchDecode(b *testing.B, src []byte) { + encoded, err := Encode(nil, src) + if err != nil { + b.Fatal(err) + } + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Decode(src, encoded) + } +} + +func benchEncode(b *testing.B, src []byte) { + // Bandwidth is in amount of uncompressed data. + b.SetBytes(int64(len(src))) + dst := make([]byte, MaxEncodedLen(len(src))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + Encode(dst, src) + } +} + +func readFile(b *testing.B, filename string) []byte { + src, err := ioutil.ReadFile(filename) + if err != nil { + b.Fatalf("failed reading %s: %s", filename, err) + } + if len(src) == 0 { + b.Fatalf("%s has zero length", filename) + } + return src +} + +// expand returns a slice of length n containing repeated copies of src. +func expand(src []byte, n int) []byte { + dst := make([]byte, n) + for x := dst; len(x) > 0; { + i := copy(x, src) + x = x[i:] + } + return dst +} + +func benchWords(b *testing.B, n int, decode bool) { + // Note: the file is OS-language dependent so the resulting values are not + // directly comparable for non-US-English OS installations. + data := expand(readFile(b, "/usr/share/dict/words"), n) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } +func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } +func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } +func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } +func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } +func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } +func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } +func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } + +// testFiles' values are copied directly from +// https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. +// The label field is unused in snappy-go. +var testFiles = []struct { + label string + filename string +}{ + {"html", "html"}, + {"urls", "urls.10K"}, + {"jpg", "house.jpg"}, + {"pdf", "mapreduce-osdi-1.pdf"}, + {"html4", "html_x_4"}, + {"cp", "cp.html"}, + {"c", "fields.c"}, + {"lsp", "grammar.lsp"}, + {"xls", "kennedy.xls"}, + {"txt1", "alice29.txt"}, + {"txt2", "asyoulik.txt"}, + {"txt3", "lcet10.txt"}, + {"txt4", "plrabn12.txt"}, + {"bin", "ptt5"}, + {"sum", "sum"}, + {"man", "xargs.1"}, + {"pb", "geo.protodata"}, + {"gaviota", "kppkn.gtb"}, +} + +// The test data files are present at this canonical URL. +const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" + +func downloadTestdata(basename string) (errRet error) { + filename := filepath.Join("testdata", basename) + f, err := os.Create(filename) + if err != nil { + return fmt.Errorf("failed to create %s: %s", filename, err) + } + defer f.Close() + defer func() { + if errRet != nil { + os.Remove(filename) + } + }() + resp, err := http.Get(baseURL + basename) + if err != nil { + return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) + } + defer resp.Body.Close() + _, err = io.Copy(f, resp.Body) + if err != nil { + return fmt.Errorf("failed to write %s: %s", filename, err) + } + return nil +} + +func benchFile(b *testing.B, n int, decode bool) { + filename := filepath.Join("testdata", testFiles[n].filename) + if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { + if !*download { + b.Fatal("test data not found; skipping benchmark without the -download flag") + } + // Download the official snappy C++ implementation reference test data + // files for benchmarking. + if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { + b.Fatalf("failed to create testdata: %s", err) + } + for _, tf := range testFiles { + if err := downloadTestdata(tf.filename); err != nil { + b.Fatalf("failed to download testdata: %s", err) + } + } + } + data := readFile(b, filename) + if decode { + benchDecode(b, data) + } else { + benchEncode(b, data) + } +} + +// Naming convention is kept similar to what snappy's C++ implementation uses. +func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } +func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } +func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } +func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } +func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } +func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } +func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } +func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } +func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } +func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } +func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } +func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } +func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } +func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } +func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } +func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } +func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } +func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } +func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } +func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } +func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } +func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } +func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } +func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } +func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } +func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } +func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } +func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } +func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } +func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } +func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } +func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } +func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } +func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } +func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } +func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } diff --git a/vendor/github.com/mreiferson/go-snappystream/writer_test.go b/vendor/github.com/mreiferson/go-snappystream/writer_test.go new file mode 100644 index 0000000000..d16fd25b79 --- /dev/null +++ b/vendor/github.com/mreiferson/go-snappystream/writer_test.go @@ -0,0 +1,126 @@ +package snappystream + +import ( + "bytes" + "io/ioutil" + "log" + "testing" +) + +// This test ensures that all BufferedWriter methods fail after Close has been +// called. +func TestBufferedWriterClose(t *testing.T) { + w := NewBufferedWriter(ioutil.Discard) + err := w.Close() + if err != nil { + log.Fatalf("closing empty BufferedWriter: %v", err) + } + err = w.Close() + if err == nil { + log.Fatalf("successful close after close") + } + err = w.Flush() + if err == nil { + log.Fatalf("successful flush after close") + } + _, err = w.Write([]byte("abc")) + if err == nil { + log.Fatalf("successful write after close") + } +} + +// This test simply checks that buffering has an effect in a situation where it +// is know it should. +func TestBufferedWriter_compression(t *testing.T) { + p := []byte("hello snappystream!") + n := 10 + + var shortbuf bytes.Buffer + w := NewWriter(&shortbuf) + for i := 0; i < n; i++ { + n, err := w.Write(p) + if err != nil { + t.Fatalf("writer error: %v", err) + } + if n != len(p) { + t.Fatalf("short write: %d", n) + } + } + + var buf bytes.Buffer + bw := NewBufferedWriter(&buf) + for i := 0; i < n; i++ { + n, err := bw.Write(p) + if err != nil { + t.Fatalf("buffered writer error: %v", err) + } + if n != len(p) { + t.Fatalf("short write: %d", n) + } + } + err := bw.Close() + if err != nil { + t.Fatalf("closing buffer: %v", err) + } + + uncompressed := int64(n) * int64(len(p)) + compressed := shortbuf.Len() + bufcompressed := buf.Len() + + if compressed <= bufcompressed { + t.Fatalf("no benefit from buffering (%d <= %d)", shortbuf.Len(), buf.Len()) + } + + c := float64(uncompressed) / float64(compressed) + bufc := float64(uncompressed) / float64(bufcompressed) + improved := bufc / c + + t.Logf("BufferedWriter compression ratio %g (%.03g factor improvement over %g)", bufc, improved, c) +} + +// This tests ensures flushing after every write is equivalent to using +// NewWriter directly. +func TestBufferedWriterFlush(t *testing.T) { + p := []byte("hello snappystream!") + n := 10 + + var shortbuf bytes.Buffer + w := NewWriter(&shortbuf) + for i := 0; i < n; i++ { + n, err := w.Write(p) + if err != nil { + t.Fatalf("writer error: %v", err) + } + if n != len(p) { + t.Fatalf("short write: %d", n) + } + } + + var buf bytes.Buffer + bw := NewBufferedWriter(&buf) + for i := 0; i < n; i++ { + n, err := bw.Write(p) + if err != nil { + t.Fatalf("buffered writer error: %v", err) + } + if n != len(p) { + t.Fatalf("short write: %d", n) + } + err = bw.Flush() + if err != nil { + t.Fatalf("flush: %v", err) + } + } + err := bw.Close() + if err != nil { + t.Fatalf("closing buffer: %v", err) + } + + if shortbuf.Len() != buf.Len() { + t.Fatalf("unexpected size: %d != %d", shortbuf.Len(), buf.Len()) + } + + if !bytes.Equal(shortbuf.Bytes(), buf.Bytes()) { + t.Fatalf("unexpected bytes") + } +} diff --git a/vendor/github.com/nsqio/go-nsq/.travis.yml b/vendor/github.com/nsqio/go-nsq/.travis.yml new file mode 100644 index 0000000000..b4d330eaec --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/.travis.yml @@ -0,0 +1,26 @@ +language: go +go: + - 1.4.3 + - 1.5.3 + - 1.6 +env: + - NSQ_DOWNLOAD=nsq-0.3.0.linux-amd64.go1.3.3 GOARCH=amd64 + - NSQ_DOWNLOAD=nsq-0.3.0.linux-amd64.go1.3.3 GOARCH=386 + - NSQ_DOWNLOAD=nsq-0.3.1.linux-amd64.go1.4.1 GOARCH=amd64 + - NSQ_DOWNLOAD=nsq-0.3.1.linux-amd64.go1.4.1 GOARCH=386 + - NSQ_DOWNLOAD=nsq-0.3.2.linux-amd64.go1.4.1 GOARCH=amd64 + - NSQ_DOWNLOAD=nsq-0.3.2.linux-amd64.go1.4.1 GOARCH=386 + - NSQ_DOWNLOAD=nsq-0.3.5.linux-amd64.go1.4.2 GOARCH=amd64 + - NSQ_DOWNLOAD=nsq-0.3.5.linux-amd64.go1.4.2 GOARCH=386 +install: + - go get github.com/mreiferson/go-snappystream +script: + - wget http://bitly-downloads.s3.amazonaws.com/nsq/$NSQ_DOWNLOAD.tar.gz + - tar zxvf $NSQ_DOWNLOAD.tar.gz + - export PATH=$NSQ_DOWNLOAD/bin:$PATH + - pushd $TRAVIS_BUILD_DIR + - ./test.sh + - popd +notifications: + email: false +sudo: false diff --git a/vendor/github.com/nsqio/go-nsq/command_test.go b/vendor/github.com/nsqio/go-nsq/command_test.go new file mode 100644 index 0000000000..5c784d493f --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/command_test.go @@ -0,0 +1,18 @@ +package nsq + +import ( + "bytes" + "testing" +) + +func BenchmarkCommand(b *testing.B) { + b.StopTimer() + data := make([]byte, 2048) + cmd := Publish("test", data) + var buf bytes.Buffer + b.StartTimer() + + for i := 0; i < b.N; i++ { + cmd.WriteTo(&buf) + } +} diff --git a/vendor/github.com/nsqio/go-nsq/config_flag_test.go b/vendor/github.com/nsqio/go-nsq/config_flag_test.go new file mode 100644 index 0000000000..37a745dcf6 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/config_flag_test.go @@ -0,0 +1,25 @@ +package nsq_test + +import ( + "flag" + + "github.com/nsqio/go-nsq" +) + +func ExampleConfigFlag() { + cfg := nsq.NewConfig() + flagSet := flag.NewFlagSet("", flag.ExitOnError) + + flagSet.Var(&nsq.ConfigFlag{cfg}, "consumer-opt", "option to pass through to nsq.Consumer (may be given multiple times)") + flagSet.PrintDefaults() + + err := flagSet.Parse([]string{ + "--consumer-opt=heartbeat_interval,1s", + "--consumer-opt=max_attempts,10", + }) + if err != nil { + panic(err.Error()) + } + println("HeartbeatInterval", cfg.HeartbeatInterval) + println("MaxAttempts", cfg.MaxAttempts) +} diff --git a/vendor/github.com/nsqio/go-nsq/config_test.go b/vendor/github.com/nsqio/go-nsq/config_test.go new file mode 100644 index 0000000000..060b07396b --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/config_test.go @@ -0,0 +1,104 @@ +package nsq + +import ( + "math/rand" + "net" + "reflect" + "testing" + "time" +) + +func TestConfigSet(t *testing.T) { + c := NewConfig() + if err := c.Set("not a real config value", struct{}{}); err == nil { + t.Error("No error when setting an invalid value") + } + if err := c.Set("tls_v1", "lol"); err == nil { + t.Error("No error when setting `tls_v1` to an invalid value") + } + if err := c.Set("tls_v1", true); err != nil { + t.Errorf("Error setting `tls_v1` config. %s", err) + } + + if err := c.Set("tls-insecure-skip-verify", true); err != nil { + t.Errorf("Error setting `tls-insecure-skip-verify` config. %v", err) + } + if c.TlsConfig.InsecureSkipVerify != true { + t.Errorf("Error setting `tls-insecure-skip-verify` config: %v", c.TlsConfig) + } + if err := c.Set("tls-min-version", "tls1.2"); err != nil { + t.Errorf("Error setting `tls-min-version` config: %s", err) + } + if err := c.Set("tls-min-version", "tls1.3"); err == nil { + t.Error("No error when setting `tls-min-version` to an invalid value") + } + if err := c.Set("local_addr", &net.TCPAddr{}); err != nil { + t.Errorf("Error setting `local_addr` config: %s", err) + } + if err := c.Set("local_addr", "1.2.3.4:27015"); err != nil { + t.Errorf("Error setting `local_addr` config: %s", err) + } + if err := c.Set("dial_timeout", "5s"); err != nil { + t.Errorf("Error setting `dial_timeout` config: %s", err) + } + if c.LocalAddr.String() != "1.2.3.4:27015" { + t.Error("Failed to assign `local_addr` config") + } + if reflect.ValueOf(c.BackoffStrategy).Type().String() != "*nsq.ExponentialStrategy" { + t.Error("Failed to set default `exponential` backoff strategy") + } + if err := c.Set("backoff_strategy", "full_jitter"); err != nil { + t.Errorf("Failed to assign `backoff_strategy` config: %v", err) + } + if reflect.ValueOf(c.BackoffStrategy).Type().String() != "*nsq.FullJitterStrategy" { + t.Error("Failed to set `full_jitter` backoff strategy") + } +} + +func TestConfigValidate(t *testing.T) { + c := NewConfig() + if err := c.Validate(); err != nil { + t.Error("initialized config is invalid") + } + c.DeflateLevel = 100 + if err := c.Validate(); err == nil { + t.Error("no error set for invalid value") + } +} + +func TestExponentialBackoff(t *testing.T) { + expected := []time.Duration{ + 1 * time.Second, + 2 * time.Second, + 8 * time.Second, + 32 * time.Second, + } + backoffTest(t, expected, func(c *Config) BackoffStrategy { + return &ExponentialStrategy{cfg: c} + }) +} + +func TestFullJitterBackoff(t *testing.T) { + expected := []time.Duration{ + 566028617 * time.Nanosecond, + 1365407263 * time.Nanosecond, + 5232470547 * time.Nanosecond, + 21467499218 * time.Nanosecond, + } + backoffTest(t, expected, func(c *Config) BackoffStrategy { + return &FullJitterStrategy{cfg: c, rng: rand.New(rand.NewSource(99))} + }) +} + +func backoffTest(t *testing.T, expected []time.Duration, cb func(c *Config) BackoffStrategy) { + config := NewConfig() + attempts := []int{0, 1, 3, 5} + s := cb(config) + for i := range attempts { + result := s.Calculate(attempts[i]) + if result != expected[i] { + t.Fatalf("wrong backoff duration %v for attempt %d (should be %v)", + result, attempts[i], expected[i]) + } + } +} diff --git a/vendor/github.com/nsqio/go-nsq/consumer_test.go b/vendor/github.com/nsqio/go-nsq/consumer_test.go new file mode 100644 index 0000000000..5c5816d5bb --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/consumer_test.go @@ -0,0 +1,237 @@ +package nsq + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "strconv" + "strings" + "testing" + "time" +) + +type MyTestHandler struct { + t *testing.T + q *Consumer + messagesSent int + messagesReceived int + messagesFailed int +} + +var nullLogger = log.New(ioutil.Discard, "", log.LstdFlags) + +func (h *MyTestHandler) LogFailedMessage(message *Message) { + h.messagesFailed++ + h.q.Stop() +} + +func (h *MyTestHandler) HandleMessage(message *Message) error { + if string(message.Body) == "TOBEFAILED" { + h.messagesReceived++ + return errors.New("fail this message") + } + + data := struct { + Msg string + }{} + + err := json.Unmarshal(message.Body, &data) + if err != nil { + return err + } + + msg := data.Msg + if msg != "single" && msg != "double" { + h.t.Error("message 'action' was not correct: ", msg, data) + } + h.messagesReceived++ + return nil +} + +func SendMessage(t *testing.T, port int, topic string, method string, body []byte) { + httpclient := &http.Client{} + endpoint := fmt.Sprintf("http://127.0.0.1:%d/%s?topic=%s", port, method, topic) + req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(body)) + resp, err := httpclient.Do(req) + if err != nil { + t.Fatalf(err.Error()) + return + } + resp.Body.Close() +} + +func TestConsumer(t *testing.T) { + consumerTest(t, nil) +} + +func TestConsumerTLS(t *testing.T) { + consumerTest(t, func(c *Config) { + c.TlsV1 = true + c.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + }) +} + +func TestConsumerDeflate(t *testing.T) { + consumerTest(t, func(c *Config) { + c.Deflate = true + }) +} + +func TestConsumerSnappy(t *testing.T) { + consumerTest(t, func(c *Config) { + c.Snappy = true + }) +} + +func TestConsumerTLSDeflate(t *testing.T) { + consumerTest(t, func(c *Config) { + c.TlsV1 = true + c.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + c.Deflate = true + }) +} + +func TestConsumerTLSSnappy(t *testing.T) { + consumerTest(t, func(c *Config) { + c.TlsV1 = true + c.TlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + c.Snappy = true + }) +} + +func TestConsumerTLSClientCert(t *testing.T) { + envDl := os.Getenv("NSQ_DOWNLOAD") + if strings.HasPrefix(envDl, "nsq-0.2.24") || strings.HasPrefix(envDl, "nsq-0.2.27") { + t.Log("skipping due to older nsqd") + return + } + cert, _ := tls.LoadX509KeyPair("./test/client.pem", "./test/client.key") + consumerTest(t, func(c *Config) { + c.TlsV1 = true + c.TlsConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + InsecureSkipVerify: true, + } + }) +} + +func TestConsumerTLSClientCertViaSet(t *testing.T) { + envDl := os.Getenv("NSQ_DOWNLOAD") + if strings.HasPrefix(envDl, "nsq-0.2.24") || strings.HasPrefix(envDl, "nsq-0.2.27") { + t.Log("skipping due to older nsqd") + return + } + consumerTest(t, func(c *Config) { + c.Set("tls_v1", true) + c.Set("tls_cert", "./test/client.pem") + c.Set("tls_key", "./test/client.key") + c.Set("tls_insecure_skip_verify", true) + }) +} + +func consumerTest(t *testing.T, cb func(c *Config)) { + config := NewConfig() + laddr := "127.0.0.1" + // so that the test can simulate binding consumer to specified address + config.LocalAddr, _ = net.ResolveTCPAddr("tcp", laddr+":0") + // so that the test can simulate reaching max requeues and a call to LogFailedMessage + config.DefaultRequeueDelay = 0 + // so that the test wont timeout from backing off + config.MaxBackoffDuration = time.Millisecond * 50 + if cb != nil { + cb(config) + } + topicName := "rdr_test" + if config.Deflate { + topicName = topicName + "_deflate" + } else if config.Snappy { + topicName = topicName + "_snappy" + } + if config.TlsV1 { + topicName = topicName + "_tls" + } + topicName = topicName + strconv.Itoa(int(time.Now().Unix())) + q, _ := NewConsumer(topicName, "ch", config) + // q.SetLogger(nullLogger, LogLevelInfo) + + h := &MyTestHandler{ + t: t, + q: q, + } + q.AddHandler(h) + + SendMessage(t, 4151, topicName, "put", []byte(`{"msg":"single"}`)) + SendMessage(t, 4151, topicName, "mput", []byte("{\"msg\":\"double\"}\n{\"msg\":\"double\"}")) + SendMessage(t, 4151, topicName, "put", []byte("TOBEFAILED")) + h.messagesSent = 4 + + addr := "127.0.0.1:4150" + err := q.ConnectToNSQD(addr) + if err != nil { + t.Fatal(err) + } + + stats := q.Stats() + if stats.Connections == 0 { + t.Fatal("stats report 0 connections (should be > 0)") + } + + err = q.ConnectToNSQD(addr) + if err == nil { + t.Fatal("should not be able to connect to the same NSQ twice") + } + + conn := q.conns()[0] + if !strings.HasPrefix(conn.conn.LocalAddr().String(), laddr) { + t.Fatal("connection should be bound to the specified address:", conn.conn.LocalAddr()) + } + + err = q.DisconnectFromNSQD("1.2.3.4:4150") + if err == nil { + t.Fatal("should not be able to disconnect from an unknown nsqd") + } + + err = q.ConnectToNSQD("1.2.3.4:4150") + if err == nil { + t.Fatal("should not be able to connect to non-existent nsqd") + } + + err = q.DisconnectFromNSQD("1.2.3.4:4150") + if err != nil { + t.Fatal("should be able to disconnect from an nsqd - " + err.Error()) + } + + <-q.StopChan + + stats = q.Stats() + if stats.Connections != 0 { + t.Fatalf("stats report %d active connections (should be 0)", stats.Connections) + } + + stats = q.Stats() + if stats.MessagesReceived != uint64(h.messagesReceived+h.messagesFailed) { + t.Fatalf("stats report %d messages received (should be %d)", + stats.MessagesReceived, + h.messagesReceived+h.messagesFailed) + } + + if h.messagesReceived != 8 || h.messagesSent != 4 { + t.Fatalf("end of test. should have handled a diff number of messages (got %d, sent %d)", h.messagesReceived, h.messagesSent) + } + if h.messagesFailed != 1 { + t.Fatal("failed message not done") + } +} diff --git a/vendor/github.com/nsqio/go-nsq/mock_test.go b/vendor/github.com/nsqio/go-nsq/mock_test.go new file mode 100644 index 0000000000..62b9fc0016 --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/mock_test.go @@ -0,0 +1,472 @@ +package nsq + +import ( + "bufio" + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "log" + "net" + "strconv" + "testing" + "time" +) + +type tbLog interface { + Log(...interface{}) +} + +type testLogger struct { + tbLog +} + +func (tl *testLogger) Output(maxdepth int, s string) error { + tl.Log(s) + return nil +} + +func newTestLogger(tbl tbLog) logger { + return &testLogger{tbl} +} + +type instruction struct { + delay time.Duration + frameType int32 + body []byte +} + +type mockNSQD struct { + script []instruction + got [][]byte + tcpAddr *net.TCPAddr + tcpListener net.Listener + exitChan chan int +} + +func newMockNSQD(script []instruction, addr string) *mockNSQD { + n := &mockNSQD{ + script: script, + exitChan: make(chan int), + } + + tcpListener, err := net.Listen("tcp", addr) + if err != nil { + log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err) + } + n.tcpListener = tcpListener + n.tcpAddr = tcpListener.Addr().(*net.TCPAddr) + + go n.listen() + + return n +} + +func (n *mockNSQD) listen() { + log.Printf("TCP: listening on %s", n.tcpListener.Addr().String()) + + for { + conn, err := n.tcpListener.Accept() + if err != nil { + break + } + go n.handle(conn) + } + + log.Printf("TCP: closing %s", n.tcpListener.Addr().String()) + close(n.exitChan) +} + +func (n *mockNSQD) handle(conn net.Conn) { + var idx int + + log.Printf("TCP: new client(%s)", conn.RemoteAddr()) + + buf := make([]byte, 4) + _, err := io.ReadFull(conn, buf) + if err != nil { + log.Fatalf("ERROR: failed to read protocol version - %s", err) + } + + readChan := make(chan []byte) + readDoneChan := make(chan int) + scriptTime := time.After(n.script[0].delay) + rdr := bufio.NewReader(conn) + + go func() { + for { + line, err := rdr.ReadBytes('\n') + if err != nil { + return + } + // trim the '\n' + line = line[:len(line)-1] + readChan <- line + <-readDoneChan + } + }() + + var rdyCount int + for idx < len(n.script) { + select { + case line := <-readChan: + log.Printf("mock: %s", line) + n.got = append(n.got, line) + params := bytes.Split(line, []byte(" ")) + switch { + case bytes.Equal(params[0], []byte("IDENTIFY")): + l := make([]byte, 4) + _, err := io.ReadFull(rdr, l) + if err != nil { + log.Printf(err.Error()) + goto exit + } + size := int32(binary.BigEndian.Uint32(l)) + b := make([]byte, size) + _, err = io.ReadFull(rdr, b) + if err != nil { + log.Printf(err.Error()) + goto exit + } + log.Printf("%s", b) + case bytes.Equal(params[0], []byte("RDY")): + rdy, _ := strconv.Atoi(string(params[1])) + rdyCount = rdy + case bytes.Equal(params[0], []byte("FIN")): + case bytes.Equal(params[0], []byte("REQ")): + } + readDoneChan <- 1 + case <-scriptTime: + inst := n.script[idx] + if bytes.Equal(inst.body, []byte("exit")) { + goto exit + } + if inst.frameType == FrameTypeMessage { + if rdyCount == 0 { + log.Printf("!!! RDY == 0") + scriptTime = time.After(n.script[idx+1].delay) + continue + } + rdyCount-- + } + _, err := conn.Write(framedResponse(inst.frameType, inst.body)) + if err != nil { + log.Printf(err.Error()) + goto exit + } + scriptTime = time.After(n.script[idx+1].delay) + idx++ + } + } + +exit: + n.tcpListener.Close() + conn.Close() +} + +func framedResponse(frameType int32, data []byte) []byte { + var w bytes.Buffer + + beBuf := make([]byte, 4) + size := uint32(len(data)) + 4 + + binary.BigEndian.PutUint32(beBuf, size) + _, err := w.Write(beBuf) + if err != nil { + return nil + } + + binary.BigEndian.PutUint32(beBuf, uint32(frameType)) + _, err = w.Write(beBuf) + if err != nil { + return nil + } + + _, err = w.Write(data) + return w.Bytes() +} + +type testHandler struct{} + +func (h *testHandler) HandleMessage(message *Message) error { + switch string(message.Body) { + case "requeue": + message.Requeue(-1) + return nil + case "requeue_no_backoff_1": + if message.Attempts > 1 { + return nil + } + message.RequeueWithoutBackoff(-1) + return nil + case "bad": + return errors.New("bad") + } + return nil +} + +func frameMessage(m *Message) []byte { + var b bytes.Buffer + m.WriteTo(&b) + return b.Bytes() +} + +func TestConsumerBackoff(t *testing.T) { + msgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} + msgGood := NewMessage(msgIDGood, []byte("good")) + + msgIDBad := MessageID{'z', 'x', 'c', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} + msgBad := NewMessage(msgIDBad, []byte("bad")) + + script := []instruction{ + // SUB + instruction{0, FrameTypeResponse, []byte("OK")}, + // IDENTIFY + instruction{0, FrameTypeResponse, []byte("OK")}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgBad)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgBad)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + // needed to exit test + instruction{200 * time.Millisecond, -1, []byte("exit")}, + } + + addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:0") + n := newMockNSQD(script, addr.String()) + + topicName := "test_consumer_commands" + strconv.Itoa(int(time.Now().Unix())) + config := NewConfig() + config.MaxInFlight = 5 + config.BackoffMultiplier = 10 * time.Millisecond + q, _ := NewConsumer(topicName, "ch", config) + q.SetLogger(newTestLogger(t), LogLevelDebug) + q.AddHandler(&testHandler{}) + err := q.ConnectToNSQD(n.tcpAddr.String()) + if err != nil { + t.Fatalf(err.Error()) + } + + <-n.exitChan + + for i, r := range n.got { + log.Printf("%d: %s", i, r) + } + + expected := []string{ + "IDENTIFY", + "SUB " + topicName + " ch", + "RDY 5", + fmt.Sprintf("FIN %s", msgIDGood), + fmt.Sprintf("FIN %s", msgIDGood), + fmt.Sprintf("FIN %s", msgIDGood), + "RDY 5", + "RDY 0", + fmt.Sprintf("REQ %s 0", msgIDBad), + "RDY 1", + "RDY 0", + fmt.Sprintf("REQ %s 0", msgIDBad), + "RDY 1", + "RDY 0", + fmt.Sprintf("FIN %s", msgIDGood), + "RDY 1", + "RDY 5", + fmt.Sprintf("FIN %s", msgIDGood), + } + if len(n.got) != len(expected) { + t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) + } + for i, r := range n.got { + if string(r) != expected[i] { + t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) + } + } +} + +func TestConsumerRequeueNoBackoff(t *testing.T) { + msgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} + msgIDRequeue := MessageID{'r', 'e', 'q', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} + msgIDRequeueNoBackoff := MessageID{'r', 'e', 'q', 'n', 'b', 'a', 'c', 'k', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} + + msgGood := NewMessage(msgIDGood, []byte("good")) + msgRequeue := NewMessage(msgIDRequeue, []byte("requeue")) + msgRequeueNoBackoff := NewMessage(msgIDRequeueNoBackoff, []byte("requeue_no_backoff_1")) + + script := []instruction{ + // SUB + instruction{0, FrameTypeResponse, []byte("OK")}, + // IDENTIFY + instruction{0, FrameTypeResponse, []byte("OK")}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeueNoBackoff)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + // needed to exit test + instruction{100 * time.Millisecond, -1, []byte("exit")}, + } + + addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:0") + n := newMockNSQD(script, addr.String()) + + topicName := "test_requeue" + strconv.Itoa(int(time.Now().Unix())) + config := NewConfig() + config.MaxInFlight = 1 + config.BackoffMultiplier = 10 * time.Millisecond + q, _ := NewConsumer(topicName, "ch", config) + q.SetLogger(newTestLogger(t), LogLevelDebug) + q.AddHandler(&testHandler{}) + err := q.ConnectToNSQD(n.tcpAddr.String()) + if err != nil { + t.Fatalf(err.Error()) + } + + select { + case <-n.exitChan: + log.Printf("clean exit") + case <-time.After(500 * time.Millisecond): + log.Printf("timeout") + } + + for i, r := range n.got { + log.Printf("%d: %s", i, r) + } + + expected := []string{ + "IDENTIFY", + "SUB " + topicName + " ch", + "RDY 1", + "RDY 1", + "RDY 0", + fmt.Sprintf("REQ %s 0", msgIDRequeue), + "RDY 1", + "RDY 0", + fmt.Sprintf("REQ %s 0", msgIDRequeueNoBackoff), + "RDY 1", + "RDY 1", + fmt.Sprintf("FIN %s", msgIDGood), + } + if len(n.got) != len(expected) { + t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) + } + for i, r := range n.got { + if string(r) != expected[i] { + t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) + } + } +} + +func TestConsumerBackoffDisconnect(t *testing.T) { + msgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} + msgIDRequeue := MessageID{'r', 'e', 'q', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} + + msgGood := NewMessage(msgIDGood, []byte("good")) + msgRequeue := NewMessage(msgIDRequeue, []byte("requeue")) + + script := []instruction{ + // SUB + instruction{0, FrameTypeResponse, []byte("OK")}, + // IDENTIFY + instruction{0, FrameTypeResponse, []byte("OK")}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + // needed to exit test + instruction{100 * time.Millisecond, -1, []byte("exit")}, + } + + addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:0") + n := newMockNSQD(script, addr.String()) + + topicName := "test_requeue" + strconv.Itoa(int(time.Now().Unix())) + config := NewConfig() + config.MaxInFlight = 5 + config.BackoffMultiplier = 10 * time.Millisecond + config.LookupdPollInterval = 10 * time.Millisecond + config.RDYRedistributeInterval = 10 * time.Millisecond + q, _ := NewConsumer(topicName, "ch", config) + q.SetLogger(newTestLogger(t), LogLevelDebug) + q.AddHandler(&testHandler{}) + err := q.ConnectToNSQD(n.tcpAddr.String()) + if err != nil { + t.Fatalf(err.Error()) + } + + select { + case <-n.exitChan: + log.Printf("clean exit") + case <-time.After(500 * time.Millisecond): + log.Printf("timeout") + } + + for i, r := range n.got { + log.Printf("%d: %s", i, r) + } + + expected := []string{ + "IDENTIFY", + "SUB " + topicName + " ch", + "RDY 5", + fmt.Sprintf("FIN %s", msgIDGood), + "RDY 0", + fmt.Sprintf("REQ %s 0", msgIDRequeue), + "RDY 1", + "RDY 0", + fmt.Sprintf("REQ %s 0", msgIDRequeue), + "RDY 1", + "RDY 0", + fmt.Sprintf("FIN %s", msgIDGood), + "RDY 1", + } + if len(n.got) != len(expected) { + t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) + } + for i, r := range n.got { + if string(r) != expected[i] { + t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) + } + } + + script = []instruction{ + // SUB + instruction{0, FrameTypeResponse, []byte("OK")}, + // IDENTIFY + instruction{0, FrameTypeResponse, []byte("OK")}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, + // needed to exit test + instruction{100 * time.Millisecond, -1, []byte("exit")}, + } + + n = newMockNSQD(script, n.tcpAddr.String()) + + select { + case <-n.exitChan: + log.Printf("clean exit") + case <-time.After(500 * time.Millisecond): + log.Printf("timeout") + } + + for i, r := range n.got { + log.Printf("%d: %s", i, r) + } + + expected = []string{ + "IDENTIFY", + "SUB " + topicName + " ch", + "RDY 1", + "RDY 5", + fmt.Sprintf("FIN %s", msgIDGood), + fmt.Sprintf("FIN %s", msgIDGood), + } + if len(n.got) != len(expected) { + t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) + } + for i, r := range n.got { + if string(r) != expected[i] { + t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) + } + } +} diff --git a/vendor/github.com/nsqio/go-nsq/producer_test.go b/vendor/github.com/nsqio/go-nsq/producer_test.go new file mode 100755 index 0000000000..9be290068d --- /dev/null +++ b/vendor/github.com/nsqio/go-nsq/producer_test.go @@ -0,0 +1,366 @@ +package nsq + +import ( + "bytes" + "errors" + "io/ioutil" + "log" + "net" + "os" + "runtime" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" +) + +type ConsumerHandler struct { + t *testing.T + q *Consumer + messagesGood int + messagesFailed int +} + +func (h *ConsumerHandler) LogFailedMessage(message *Message) { + h.messagesFailed++ + h.q.Stop() +} + +func (h *ConsumerHandler) HandleMessage(message *Message) error { + msg := string(message.Body) + if msg == "bad_test_case" { + return errors.New("fail this message") + } + if msg != "multipublish_test_case" && msg != "publish_test_case" { + h.t.Error("message 'action' was not correct:", msg) + } + h.messagesGood++ + return nil +} + +func TestProducerConnection(t *testing.T) { + config := NewConfig() + laddr := "127.0.0.1" + + config.LocalAddr, _ = net.ResolveTCPAddr("tcp", laddr+":0") + + w, _ := NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + + err := w.Publish("write_test", []byte("test")) + if err != nil { + t.Fatalf("should lazily connect - %s", err) + } + + w.Stop() + + err = w.Publish("write_test", []byte("fail test")) + if err != ErrStopped { + t.Fatalf("should not be able to write after Stop()") + } +} + +func TestProducerPing(t *testing.T) { + log.SetOutput(ioutil.Discard) + defer log.SetOutput(os.Stdout) + + config := NewConfig() + w, _ := NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + + err := w.Ping() + + if err != nil { + t.Fatalf("should connect on ping") + } + + w.Stop() + + err = w.Ping() + if err != ErrStopped { + t.Fatalf("should not be able to ping after Stop()") + } +} + +func TestProducerPublish(t *testing.T) { + topicName := "publish" + strconv.Itoa(int(time.Now().Unix())) + msgCount := 10 + + config := NewConfig() + w, _ := NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + defer w.Stop() + + for i := 0; i < msgCount; i++ { + err := w.Publish(topicName, []byte("publish_test_case")) + if err != nil { + t.Fatalf("error %s", err) + } + } + + err := w.Publish(topicName, []byte("bad_test_case")) + if err != nil { + t.Fatalf("error %s", err) + } + + readMessages(topicName, t, msgCount) +} + +func TestProducerMultiPublish(t *testing.T) { + topicName := "multi_publish" + strconv.Itoa(int(time.Now().Unix())) + msgCount := 10 + + config := NewConfig() + w, _ := NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + defer w.Stop() + + var testData [][]byte + for i := 0; i < msgCount; i++ { + testData = append(testData, []byte("multipublish_test_case")) + } + + err := w.MultiPublish(topicName, testData) + if err != nil { + t.Fatalf("error %s", err) + } + + err = w.Publish(topicName, []byte("bad_test_case")) + if err != nil { + t.Fatalf("error %s", err) + } + + readMessages(topicName, t, msgCount) +} + +func TestProducerPublishAsync(t *testing.T) { + topicName := "async_publish" + strconv.Itoa(int(time.Now().Unix())) + msgCount := 10 + + config := NewConfig() + w, _ := NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + defer w.Stop() + + responseChan := make(chan *ProducerTransaction, msgCount) + for i := 0; i < msgCount; i++ { + err := w.PublishAsync(topicName, []byte("publish_test_case"), responseChan, "test") + if err != nil { + t.Fatalf(err.Error()) + } + } + + for i := 0; i < msgCount; i++ { + trans := <-responseChan + if trans.Error != nil { + t.Fatalf(trans.Error.Error()) + } + if trans.Args[0].(string) != "test" { + t.Fatalf(`proxied arg "%s" != "test"`, trans.Args[0].(string)) + } + } + + err := w.Publish(topicName, []byte("bad_test_case")) + if err != nil { + t.Fatalf("error %s", err) + } + + readMessages(topicName, t, msgCount) +} + +func TestProducerMultiPublishAsync(t *testing.T) { + topicName := "multi_publish" + strconv.Itoa(int(time.Now().Unix())) + msgCount := 10 + + config := NewConfig() + w, _ := NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + defer w.Stop() + + var testData [][]byte + for i := 0; i < msgCount; i++ { + testData = append(testData, []byte("multipublish_test_case")) + } + + responseChan := make(chan *ProducerTransaction) + err := w.MultiPublishAsync(topicName, testData, responseChan, "test0", 1) + if err != nil { + t.Fatalf(err.Error()) + } + + trans := <-responseChan + if trans.Error != nil { + t.Fatalf(trans.Error.Error()) + } + if trans.Args[0].(string) != "test0" { + t.Fatalf(`proxied arg "%s" != "test0"`, trans.Args[0].(string)) + } + if trans.Args[1].(int) != 1 { + t.Fatalf(`proxied arg %d != 1`, trans.Args[1].(int)) + } + + err = w.Publish(topicName, []byte("bad_test_case")) + if err != nil { + t.Fatalf("error %s", err) + } + + readMessages(topicName, t, msgCount) +} + +func TestProducerHeartbeat(t *testing.T) { + topicName := "heartbeat" + strconv.Itoa(int(time.Now().Unix())) + + config := NewConfig() + config.HeartbeatInterval = 100 * time.Millisecond + w, _ := NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + defer w.Stop() + + err := w.Publish(topicName, []byte("publish_test_case")) + if err == nil { + t.Fatalf("error should not be nil") + } + if identifyError, ok := err.(ErrIdentify); !ok || + identifyError.Reason != "E_BAD_BODY IDENTIFY heartbeat interval (100) is invalid" { + t.Fatalf("wrong error - %s", err) + } + + config = NewConfig() + config.HeartbeatInterval = 1000 * time.Millisecond + w, _ = NewProducer("127.0.0.1:4150", config) + w.SetLogger(nullLogger, LogLevelInfo) + defer w.Stop() + + err = w.Publish(topicName, []byte("publish_test_case")) + if err != nil { + t.Fatalf(err.Error()) + } + + time.Sleep(1100 * time.Millisecond) + + msgCount := 10 + for i := 0; i < msgCount; i++ { + err := w.Publish(topicName, []byte("publish_test_case")) + if err != nil { + t.Fatalf("error %s", err) + } + } + + err = w.Publish(topicName, []byte("bad_test_case")) + if err != nil { + t.Fatalf("error %s", err) + } + + readMessages(topicName, t, msgCount+1) +} + +func readMessages(topicName string, t *testing.T, msgCount int) { + config := NewConfig() + config.DefaultRequeueDelay = 0 + config.MaxBackoffDuration = 50 * time.Millisecond + q, _ := NewConsumer(topicName, "ch", config) + q.SetLogger(nullLogger, LogLevelInfo) + + h := &ConsumerHandler{ + t: t, + q: q, + } + q.AddHandler(h) + + err := q.ConnectToNSQD("127.0.0.1:4150") + if err != nil { + t.Fatalf(err.Error()) + } + <-q.StopChan + + if h.messagesGood != msgCount { + t.Fatalf("end of test. should have handled a diff number of messages %d != %d", h.messagesGood, msgCount) + } + + if h.messagesFailed != 1 { + t.Fatal("failed message not done") + } +} + +type mockProducerConn struct { + delegate ConnDelegate + closeCh chan struct{} + pubCh chan struct{} +} + +func newMockProducerConn(delegate ConnDelegate) producerConn { + m := &mockProducerConn{ + delegate: delegate, + closeCh: make(chan struct{}), + pubCh: make(chan struct{}, 4), + } + go m.router() + return m +} + +func (m *mockProducerConn) String() string { + return "127.0.0.1:0" +} + +func (m *mockProducerConn) SetLogger(logger logger, level LogLevel, prefix string) {} + +func (m *mockProducerConn) Connect() (*IdentifyResponse, error) { + return &IdentifyResponse{}, nil +} + +func (m *mockProducerConn) Close() error { + close(m.closeCh) + return nil +} + +func (m *mockProducerConn) WriteCommand(cmd *Command) error { + if bytes.Equal(cmd.Name, []byte("PUB")) { + m.pubCh <- struct{}{} + } + return nil +} + +func (m *mockProducerConn) router() { + for { + select { + case <-m.closeCh: + goto exit + case <-m.pubCh: + m.delegate.OnResponse(nil, framedResponse(FrameTypeResponse, []byte("OK"))) + } + } +exit: +} + +func BenchmarkProducer(b *testing.B) { + b.StopTimer() + body := make([]byte, 512) + + config := NewConfig() + p, _ := NewProducer("127.0.0.1:0", config) + + p.conn = newMockProducerConn(&producerConnDelegate{p}) + atomic.StoreInt32(&p.state, StateConnected) + p.closeChan = make(chan int) + go p.router() + + startCh := make(chan struct{}) + var wg sync.WaitGroup + parallel := runtime.GOMAXPROCS(0) + + for j := 0; j < parallel; j++ { + wg.Add(1) + go func() { + <-startCh + for i := 0; i < b.N/parallel; i++ { + p.Publish("test", body) + } + wg.Done() + }() + } + + b.StartTimer() + close(startCh) + wg.Wait() +} diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore new file mode 100644 index 0000000000..565f0f7322 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.gitignore @@ -0,0 +1,13 @@ +# IntelliJ project files +.idea/ +opentracing-go.iml +opentracing-go.ipr +opentracing-go.iws + +# Test results +*.cov +*.html +test.log + +# Build dir +build/ diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml new file mode 100644 index 0000000000..0538f1bfc0 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +install: + - go get -d -t github.com/opentracing/opentracing-go/... + - go get -u github.com/golang/lint/... +script: + - make test lint + - go build ./... diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags_test.go b/vendor/github.com/opentracing/opentracing-go/ext/tags_test.go new file mode 100644 index 0000000000..ea9af335c7 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags_test.go @@ -0,0 +1,148 @@ +package ext_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/mocktracer" +) + +func TestPeerTags(t *testing.T) { + if ext.PeerService != "peer.service" { + t.Fatalf("Invalid PeerService %v", ext.PeerService) + } + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace") + ext.PeerService.Set(span, "my-service") + ext.PeerAddress.Set(span, "my-hostname:8080") + ext.PeerHostname.Set(span, "my-hostname") + ext.PeerHostIPv4.Set(span, uint32(127<<24|1)) + ext.PeerHostIPv6.Set(span, "::") + ext.PeerPort.Set(span, uint16(8080)) + ext.SamplingPriority.Set(span, uint16(1)) + ext.SpanKind.Set(span, ext.SpanKindRPCServerEnum) + ext.SpanKindRPCClient.Set(span) + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "peer.service": "my-service", + "peer.address": "my-hostname:8080", + "peer.hostname": "my-hostname", + "peer.ipv4": uint32(127<<24 | 1), + "peer.ipv6": "::", + "peer.port": uint16(8080), + "span.kind": ext.SpanKindRPCClientEnum, + }, rawSpan.Tags()) + assert.True(t, span.Context().(mocktracer.MockSpanContext).Sampled) + ext.SamplingPriority.Set(span, uint16(0)) + assert.False(t, span.Context().(mocktracer.MockSpanContext).Sampled) +} + +func TestHTTPTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindRPCServer) + ext.HTTPUrl.Set(span, "test.biz/uri?protocol=false") + ext.HTTPMethod.Set(span, "GET") + ext.HTTPStatusCode.Set(span, 301) + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "http.url": "test.biz/uri?protocol=false", + "http.method": "GET", + "http.status_code": uint16(301), + "span.kind": ext.SpanKindRPCServerEnum, + }, rawSpan.Tags()) +} + +func TestDBTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindRPCClient) + ext.DBInstance.Set(span, "127.0.0.1:3306/customers") + ext.DBStatement.Set(span, "SELECT * FROM user_table") + ext.DBType.Set(span, "sql") + ext.DBUser.Set(span, "customer_user") + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "db.instance": "127.0.0.1:3306/customers", + "db.statement": "SELECT * FROM user_table", + "db.type": "sql", + "db.user": "customer_user", + "span.kind": ext.SpanKindRPCClientEnum, + }, rawSpan.Tags()) +} + +func TestMiscTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace") + ext.Component.Set(span, "my-awesome-library") + ext.SamplingPriority.Set(span, 1) + ext.Error.Set(span, true) + + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "component": "my-awesome-library", + "error": true, + }, rawSpan.Tags()) +} + +func TestRPCServerOption(t *testing.T) { + tracer := mocktracer.New() + parent := tracer.StartSpan("my-trace") + parent.SetBaggageItem("bag", "gage") + + carrier := opentracing.HTTPHeadersCarrier{} + err := tracer.Inject(parent.Context(), opentracing.HTTPHeaders, carrier) + if err != nil { + t.Fatal(err) + } + + parCtx, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + if err != nil { + t.Fatal(err) + } + + tracer.StartSpan("my-child", ext.RPCServerOption(parCtx)).Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "span.kind": ext.SpanKindRPCServerEnum, + }, rawSpan.Tags()) + assert.Equal(t, map[string]string{ + "bag": "gage", + }, rawSpan.Context().(mocktracer.MockSpanContext).Baggage) +} + +func TestMessageBusProducerTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindProducer) + ext.MessageBusDestination.Set(span, "topic name") + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "message_bus.destination": "topic name", + "span.kind": ext.SpanKindProducerEnum, + }, rawSpan.Tags()) +} + +func TestMessageBusConsumerTags(t *testing.T) { + tracer := mocktracer.New() + span := tracer.StartSpan("my-trace", ext.SpanKindConsumer) + ext.MessageBusDestination.Set(span, "topic name") + span.Finish() + + rawSpan := tracer.FinishedSpans()[0] + assert.Equal(t, map[string]interface{}{ + "message_bus.destination": "topic name", + "span.kind": ext.SpanKindConsumerEnum, + }, rawSpan.Tags()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext_test.go b/vendor/github.com/opentracing/opentracing-go/gocontext_test.go new file mode 100644 index 0000000000..65c0130861 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext_test.go @@ -0,0 +1,81 @@ +package opentracing + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestContextWithSpan(t *testing.T) { + span := &noopSpan{} + ctx := ContextWithSpan(context.Background(), span) + span2 := SpanFromContext(ctx) + if span != span2 { + t.Errorf("Not the same span returned from context, expected=%+v, actual=%+v", span, span2) + } + + ctx = context.Background() + span2 = SpanFromContext(ctx) + if span2 != nil { + t.Errorf("Expected nil span, found %+v", span2) + } + + ctx = ContextWithSpan(ctx, span) + span2 = SpanFromContext(ctx) + if span != span2 { + t.Errorf("Not the same span returned from context, expected=%+v, actual=%+v", span, span2) + } +} + +func TestStartSpanFromContext(t *testing.T) { + testTracer := testTracer{} + + // Test the case where there *is* a Span in the Context. + { + parentSpan := &testSpan{} + parentCtx := ContextWithSpan(context.Background(), parentSpan) + childSpan, childCtx := startSpanFromContextWithTracer(parentCtx, testTracer, "child") + if !childSpan.Context().(testSpanContext).HasParent { + t.Errorf("Failed to find parent: %v", childSpan) + } + if !childSpan.(testSpan).Equal(SpanFromContext(childCtx)) { + t.Errorf("Unable to find child span in context: %v", childCtx) + } + } + + // Test the case where there *is not* a Span in the Context. + { + emptyCtx := context.Background() + childSpan, childCtx := startSpanFromContextWithTracer(emptyCtx, testTracer, "child") + if childSpan.Context().(testSpanContext).HasParent { + t.Errorf("Should not have found parent: %v", childSpan) + } + if !childSpan.(testSpan).Equal(SpanFromContext(childCtx)) { + t.Errorf("Unable to find child span in context: %v", childCtx) + } + } +} + +func TestStartSpanFromContextOptions(t *testing.T) { + testTracer := testTracer{} + + // Test options are passed to tracer + + startTime := time.Now().Add(-10 * time.Second) // ten seconds ago + span, ctx := startSpanFromContextWithTracer( + context.Background(), testTracer, "parent", StartTime(startTime), Tag{"component", "test"}) + + assert.Equal(t, "test", span.(testSpan).Tags["component"]) + assert.Equal(t, startTime, span.(testSpan).StartTime) + + // Test it also works for a child span + + childStartTime := startTime.Add(3 * time.Second) + childSpan, _ := startSpanFromContextWithTracer( + ctx, testTracer, "child", StartTime(childStartTime)) + + assert.Equal(t, childSpan.(testSpan).Tags["component"], nil) + assert.Equal(t, childSpan.(testSpan).StartTime, childStartTime) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field_test.go b/vendor/github.com/opentracing/opentracing-go/log/field_test.go new file mode 100644 index 0000000000..73ab172d30 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field_test.go @@ -0,0 +1,51 @@ +package log + +import ( + "fmt" + "testing" +) + +func TestFieldString(t *testing.T) { + testCases := []struct { + field Field + expected string + }{ + { + field: String("key", "value"), + expected: "key:value", + }, + { + field: Bool("key", true), + expected: "key:true", + }, + { + field: Int("key", 5), + expected: "key:5", + }, + { + field: Error(fmt.Errorf("err msg")), + expected: "error:err msg", + }, + { + field: Error(nil), + expected: "error:", + }, + { + field: Noop(), + expected: ":", + }, + } + for i, tc := range testCases { + if str := tc.field.String(); str != tc.expected { + t.Errorf("%d: expected '%s', got '%s'", i, tc.expected, str) + } + } +} + +func TestNoopDoesNotMarshal(t *testing.T) { + mockEncoder := struct { + Encoder + }{} + f := Noop() + f.Marshal(mockEncoder) // panics if any Encoder method is invoked +} diff --git a/vendor/github.com/opentracing/opentracing-go/options_test.go b/vendor/github.com/opentracing/opentracing-go/options_test.go new file mode 100644 index 0000000000..56a543bfe5 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/options_test.go @@ -0,0 +1,31 @@ +package opentracing + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestChildOfAndFollowsFrom(t *testing.T) { + tests := []struct { + newOpt func(SpanContext) SpanReference + refType SpanReferenceType + name string + }{ + {ChildOf, ChildOfRef, "ChildOf"}, + {FollowsFrom, FollowsFromRef, "FollowsFrom"}, + } + + for _, test := range tests { + opts := new(StartSpanOptions) + + test.newOpt(nil).Apply(opts) + require.Nil(t, opts.References, "%s(nil) must not append a reference", test.name) + + ctx := new(noopSpanContext) + test.newOpt(ctx).Apply(opts) + require.Equal(t, []SpanReference{ + SpanReference{ReferencedContext: ctx, Type: test.refType}, + }, opts.References, "%s(ctx) must append a reference", test.name) + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation_test.go b/vendor/github.com/opentracing/opentracing-go/propagation_test.go new file mode 100644 index 0000000000..e3dad55978 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation_test.go @@ -0,0 +1,93 @@ +package opentracing + +import ( + "net/http" + "strconv" + "testing" +) + +const testHeaderPrefix = "testprefix-" + +func TestTextMapCarrierInject(t *testing.T) { + m := make(map[string]string) + m["NotOT"] = "blah" + m["opname"] = "AlsoNotOT" + tracer := testTracer{} + span := tracer.StartSpan("someSpan") + fakeID := span.Context().(testSpanContext).FakeID + + carrier := TextMapCarrier(m) + if err := span.Tracer().Inject(span.Context(), TextMap, carrier); err != nil { + t.Fatal(err) + } + + if len(m) != 3 { + t.Errorf("Unexpected header length: %v", len(m)) + } + // The prefix comes from just above; the suffix comes from + // testTracer.Inject(). + if m["testprefix-fakeid"] != strconv.Itoa(fakeID) { + t.Errorf("Could not find fakeid at expected key") + } +} + +func TestTextMapCarrierExtract(t *testing.T) { + m := make(map[string]string) + m["NotOT"] = "blah" + m["opname"] = "AlsoNotOT" + m["testprefix-fakeid"] = "42" + tracer := testTracer{} + + carrier := TextMapCarrier(m) + extractedContext, err := tracer.Extract(TextMap, carrier) + if err != nil { + t.Fatal(err) + } + + if extractedContext.(testSpanContext).FakeID != 42 { + t.Errorf("Failed to read testprefix-fakeid correctly") + } +} + +func TestHTTPHeaderInject(t *testing.T) { + h := http.Header{} + h.Add("NotOT", "blah") + h.Add("opname", "AlsoNotOT") + tracer := testTracer{} + span := tracer.StartSpan("someSpan") + fakeID := span.Context().(testSpanContext).FakeID + + // Use HTTPHeadersCarrier to wrap around `h`. + carrier := HTTPHeadersCarrier(h) + if err := span.Tracer().Inject(span.Context(), HTTPHeaders, carrier); err != nil { + t.Fatal(err) + } + + if len(h) != 3 { + t.Errorf("Unexpected header length: %v", len(h)) + } + // The prefix comes from just above; the suffix comes from + // testTracer.Inject(). + if h.Get("testprefix-fakeid") != strconv.Itoa(fakeID) { + t.Errorf("Could not find fakeid at expected key") + } +} + +func TestHTTPHeaderExtract(t *testing.T) { + h := http.Header{} + h.Add("NotOT", "blah") + h.Add("opname", "AlsoNotOT") + h.Add("testprefix-fakeid", "42") + tracer := testTracer{} + + // Use HTTPHeadersCarrier to wrap around `h`. + carrier := HTTPHeadersCarrier(h) + spanContext, err := tracer.Extract(HTTPHeaders, carrier) + if err != nil { + t.Fatal(err) + } + + if spanContext.(testSpanContext).FakeID != 42 { + t.Errorf("Failed to read testprefix-fakeid correctly") + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/testtracer_test.go b/vendor/github.com/opentracing/opentracing-go/testtracer_test.go new file mode 100644 index 0000000000..dd13788cf0 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/testtracer_test.go @@ -0,0 +1,138 @@ +package opentracing + +import ( + "strconv" + "strings" + "time" + + "github.com/opentracing/opentracing-go/log" +) + +const testHTTPHeaderPrefix = "testprefix-" + +// testTracer is a most-noop Tracer implementation that makes it possible for +// unittests to verify whether certain methods were / were not called. +type testTracer struct{} + +var fakeIDSource = 1 + +func nextFakeID() int { + fakeIDSource++ + return fakeIDSource +} + +type testSpanContext struct { + HasParent bool + FakeID int +} + +func (n testSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +type testSpan struct { + spanContext testSpanContext + OperationName string + StartTime time.Time + Tags map[string]interface{} +} + +func (n testSpan) Equal(os Span) bool { + other, ok := os.(testSpan) + if !ok { + return false + } + if n.spanContext != other.spanContext { + return false + } + if n.OperationName != other.OperationName { + return false + } + if !n.StartTime.Equal(other.StartTime) { + return false + } + if len(n.Tags) != len(other.Tags) { + return false + } + + for k, v := range n.Tags { + if ov, ok := other.Tags[k]; !ok || ov != v { + return false + } + } + + return true +} + +// testSpan: +func (n testSpan) Context() SpanContext { return n.spanContext } +func (n testSpan) SetTag(key string, value interface{}) Span { return n } +func (n testSpan) Finish() {} +func (n testSpan) FinishWithOptions(opts FinishOptions) {} +func (n testSpan) LogFields(fields ...log.Field) {} +func (n testSpan) LogKV(kvs ...interface{}) {} +func (n testSpan) SetOperationName(operationName string) Span { return n } +func (n testSpan) Tracer() Tracer { return testTracer{} } +func (n testSpan) SetBaggageItem(key, val string) Span { return n } +func (n testSpan) BaggageItem(key string) string { return "" } +func (n testSpan) LogEvent(event string) {} +func (n testSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n testSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n testTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + sso := StartSpanOptions{} + for _, o := range opts { + o.Apply(&sso) + } + return n.startSpanWithOptions(operationName, sso) +} + +func (n testTracer) startSpanWithOptions(name string, opts StartSpanOptions) Span { + fakeID := nextFakeID() + if len(opts.References) > 0 { + fakeID = opts.References[0].ReferencedContext.(testSpanContext).FakeID + } + + return testSpan{ + OperationName: name, + StartTime: opts.StartTime, + Tags: opts.Tags, + spanContext: testSpanContext{ + HasParent: len(opts.References) > 0, + FakeID: fakeID, + }, + } +} + +// Inject belongs to the Tracer interface. +func (n testTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + spanContext := sp.(testSpanContext) + switch format { + case HTTPHeaders, TextMap: + carrier.(TextMapWriter).Set(testHTTPHeaderPrefix+"fakeid", strconv.Itoa(spanContext.FakeID)) + return nil + } + return ErrUnsupportedFormat +} + +// Extract belongs to the Tracer interface. +func (n testTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + switch format { + case HTTPHeaders, TextMap: + // Just for testing purposes... generally not a worthwhile thing to + // propagate. + sm := testSpanContext{} + err := carrier.(TextMapReader).ForeachKey(func(key, val string) error { + switch strings.ToLower(key) { + case testHTTPHeaderPrefix + "fakeid": + i, err := strconv.Atoi(val) + if err != nil { + return err + } + sm.FakeID = i + } + return nil + }) + return sm, err + } + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/philhofer/fwd/reader_test.go b/vendor/github.com/philhofer/fwd/reader_test.go new file mode 100644 index 0000000000..5d40acf323 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/reader_test.go @@ -0,0 +1,354 @@ +package fwd + +import ( + "bytes" + "io" + "io/ioutil" + "math/rand" + "testing" + "unsafe" +) + +// partialReader reads into only +// part of the supplied byte slice +// to the underlying reader +type partialReader struct { + r io.Reader +} + +func (p partialReader) Read(b []byte) (int, error) { + n := max(1, rand.Intn(len(b))) + return p.r.Read(b[:n]) +} + +func randomBts(sz int) []byte { + o := make([]byte, sz) + for i := 0; i < len(o); i += 8 { + j := (*int64)(unsafe.Pointer(&o[i])) + *j = rand.Int63() + } + return o +} + +func TestRead(t *testing.T) { + bts := randomBts(512) + + // make the buffer much + // smaller than the underlying + // bytes to incur multiple fills + rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 128) + + if rd.BufferSize() != cap(rd.data) { + t.Errorf("BufferSize() returned %d; should return %d", rd.BufferSize(), cap(rd.data)) + } + + // starting Buffered() should be 0 + if rd.Buffered() != 0 { + t.Errorf("Buffered() should return 0 at initialization; got %d", rd.Buffered()) + } + + some := make([]byte, 32) + n, err := rd.Read(some) + if err != nil { + t.Fatal(err) + } + if n == 0 { + t.Fatal("read 0 bytes w/ a non-nil error!") + } + some = some[:n] + + more := make([]byte, 64) + j, err := rd.Read(more) + if err != nil { + t.Fatal(err) + } + if j == 0 { + t.Fatal("read 0 bytes w/ a non-nil error") + } + more = more[:j] + + out, err := ioutil.ReadAll(rd) + if err != nil { + t.Fatal(err) + } + + all := append(some, more...) + all = append(all, out...) + + if !bytes.Equal(bts, all) { + t.Errorf("bytes not equal; %d bytes in and %d bytes out", len(bts), len(out)) + } + + // test filling out of the underlying reader + big := randomBts(1 << 21) + rd = NewReaderSize(partialReader{bytes.NewReader(big)}, 2048) + buf := make([]byte, 3100) + + n, err = rd.ReadFull(buf) + if err != nil { + t.Fatal(err) + } + if n != 3100 { + t.Errorf("expected 3100 bytes read by ReadFull; got %d", n) + } + if !bytes.Equal(buf[:n], big[:n]) { + t.Error("data parity") + } + rest := make([]byte, (1<<21)-3100) + n, err = io.ReadFull(rd, rest) + if err != nil { + t.Fatal(err) + } + if n != len(rest) { + t.Errorf("expected %d bytes read by io.ReadFull; got %d", len(rest), n) + } + if !bytes.Equal(append(buf, rest...), big) { + t.Fatal("data parity") + } +} + +func TestReadByte(t *testing.T) { + bts := randomBts(512) + rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 98) + + var ( + err error + i int + b byte + ) + + // scan through the whole + // array byte-by-byte + for err != io.EOF { + b, err = rd.ReadByte() + if err == nil { + if b != bts[i] { + t.Fatalf("offset %d: %d in; %d out", i, b, bts[i]) + } + } + i++ + } + if err != io.EOF { + t.Fatal(err) + } +} + +func TestSkipNoSeek(t *testing.T) { + bts := randomBts(1024) + rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200) + + n, err := rd.Skip(512) + if err != nil { + t.Fatal(err) + } + if n != 512 { + t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512) + } + + var b byte + b, err = rd.ReadByte() + if err != nil { + t.Fatal(err) + } + + if b != bts[512] { + t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b) + } + + n, err = rd.Skip(10) + if err != nil { + t.Fatal(err) + } + if n != 10 { + t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10) + } + + // now try to skip past the end + rd = NewReaderSize(partialReader{bytes.NewReader(bts)}, 200) + + n, err = rd.Skip(2000) + if err != io.ErrUnexpectedEOF { + t.Fatalf("expected error %q; got %q", io.EOF, err) + } + if n != 1024 { + t.Fatalf("expected to skip only 1024 bytes; skipped %d", n) + } +} + +func TestSkipSeek(t *testing.T) { + bts := randomBts(1024) + + // bytes.Reader implements io.Seeker + rd := NewReaderSize(bytes.NewReader(bts), 200) + + n, err := rd.Skip(512) + if err != nil { + t.Fatal(err) + } + if n != 512 { + t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 512) + } + + var b byte + b, err = rd.ReadByte() + if err != nil { + t.Fatal(err) + } + + if b != bts[512] { + t.Fatalf("at index %d: %d in; %d out", 512, bts[512], b) + } + + n, err = rd.Skip(10) + if err != nil { + t.Fatal(err) + } + if n != 10 { + t.Fatalf("Skip() returned a nil error, but skipped %d bytes instead of %d", n, 10) + } + + // now try to skip past the end + rd.Reset(bytes.NewReader(bts)) + + // because of how bytes.Reader + // implements Seek, this should + // return (2000, nil) + n, err = rd.Skip(2000) + if err != nil { + t.Fatal(err) + } + if n != 2000 { + t.Fatalf("should have returned %d bytes; returned %d", 2000, n) + } + + // the next call to Read() + // should return io.EOF + n, err = rd.Read([]byte{0, 0, 0}) + if err != io.EOF { + t.Errorf("expected %q; got %q", io.EOF, err) + } + if n != 0 { + t.Errorf("expected 0 bytes read; got %d", n) + } + +} + +func TestPeek(t *testing.T) { + bts := randomBts(1024) + rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200) + + // first, a peek < buffer size + var ( + peek []byte + err error + ) + peek, err = rd.Peek(100) + if err != nil { + t.Fatal(err) + } + if len(peek) != 100 { + t.Fatalf("asked for %d bytes; got %d", 100, len(peek)) + } + if !bytes.Equal(peek, bts[:100]) { + t.Fatal("peeked bytes not equal") + } + + // now, a peek > buffer size + peek, err = rd.Peek(256) + if err != nil { + t.Fatal(err) + } + if len(peek) != 256 { + t.Fatalf("asked for %d bytes; got %d", 100, len(peek)) + } + if !bytes.Equal(peek, bts[:256]) { + t.Fatal("peeked bytes not equal") + } + + // now try to peek past EOF + peek, err = rd.Peek(2048) + if err != io.EOF { + t.Fatalf("expected error %q; got %q", io.EOF, err) + } + if len(peek) != 1024 { + t.Fatalf("expected %d bytes peek-able; got %d", 1024, len(peek)) + } +} + +func TestNext(t *testing.T) { + size := 1024 + bts := randomBts(size) + rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200) + + chunksize := 256 + chunks := size / chunksize + + for i := 0; i < chunks; i++ { + out, err := rd.Next(chunksize) + if err != nil { + t.Fatal(err) + } + start := chunksize * i + if !bytes.Equal(bts[start:start+chunksize], out) { + t.Fatalf("chunk %d: chunks not equal", i+1) + } + } +} + +func TestWriteTo(t *testing.T) { + bts := randomBts(2048) + rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 200) + + // cause the buffer + // to fill a little, just + // to complicate things + rd.Peek(25) + + var out bytes.Buffer + n, err := rd.WriteTo(&out) + if err != nil { + t.Fatal(err) + } + if n != 2048 { + t.Fatalf("should have written %d bytes; wrote %d", 2048, n) + } + if !bytes.Equal(out.Bytes(), bts) { + t.Fatal("bytes not equal") + } +} + +func TestReadFull(t *testing.T) { + bts := randomBts(1024) + rd := NewReaderSize(partialReader{bytes.NewReader(bts)}, 256) + + // try to ReadFull() the whole thing + out := make([]byte, 1024) + n, err := rd.ReadFull(out) + if err != nil { + t.Fatal(err) + } + if n != 1024 { + t.Fatalf("expected to read %d bytes; read %d", 1024, n) + } + if !bytes.Equal(bts, out) { + t.Fatal("bytes not equal") + } + + // we've read everything; this should EOF + n, err = rd.Read(out) + if err != io.EOF { + t.Fatalf("expected %q; got %q", io.EOF, err) + } + + rd.Reset(partialReader{bytes.NewReader(bts)}) + + // now try to read *past* EOF + out = make([]byte, 1500) + n, err = rd.ReadFull(out) + if err != io.ErrUnexpectedEOF { + t.Fatalf("expected error %q; got %q", io.EOF, err) + } + if n != 1024 { + t.Fatalf("expected to read %d bytes; read %d", 1024, n) + } +} diff --git a/vendor/github.com/philhofer/fwd/writer_test.go b/vendor/github.com/philhofer/fwd/writer_test.go new file mode 100644 index 0000000000..8bab07c99d --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_test.go @@ -0,0 +1,239 @@ +package fwd + +import ( + "bytes" + "io" + "math/rand" + "testing" +) + +type chunkedWriter struct { + w *Writer +} + +// writes 'p' in randomly-sized chunks +func (c chunkedWriter) Write(p []byte) (int, error) { + l := len(p) + n := 0 + for n < l { + amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1 + nn, err := c.w.Write(p[n : n+amt]) // + n += nn + if err == nil && nn < amt { + err = io.ErrShortWrite + } + if err != nil { + return n, err + } + } + return n, nil +} + +// analagous to Write(), but w/ str +func (c chunkedWriter) WriteString(s string) (int, error) { + l := len(s) + n := 0 + for n < l { + amt := max(rand.Intn(l-n), 1) // number of bytes to write; at least 1 + nn, err := c.w.WriteString(s[n : n+amt]) // + n += nn + if err == nil && nn < amt { + err = io.ErrShortWrite + } + if err != nil { + return n, err + } + } + return n, nil +} + +// writes via random calls to Next() +type nextWriter struct { + wr *Writer +} + +func (c nextWriter) Write(p []byte) (int, error) { + l := len(p) + n := 0 + for n < l { + amt := max(rand.Intn(l-n), 1) // at least 1 byte + fwd, err := c.wr.Next(amt) // get next (amt) bytes + if err != nil { + + // this may happen occasionally + if err == io.ErrShortBuffer { + if cap(c.wr.buf) >= amt { + panic("bad io.ErrShortBuffer") + } + continue + } + + return n, err + } + if len(fwd) != amt { + panic("bad Next() len") + } + n += copy(fwd, p[n:]) + } + return n, nil +} + +func TestWrite(t *testing.T) { + nbts := 4096 + bts := randomBts(nbts) + var buf bytes.Buffer + wr := NewWriterSize(&buf, 512) + + if wr.BufferSize() != 512 { + t.Fatalf("expected BufferSize() to be %d; found %d", 512, wr.BufferSize()) + } + + cwr := chunkedWriter{wr} + nb, err := cwr.Write(bts) + if err != nil { + t.Fatal(err) + } + if nb != nbts { + t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + if wr.Buffered() != 0 { + t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered()) + } + + if buf.Len() != nbts { + t.Fatalf("wrote %d bytes, but buffer is %d bytes long", nbts, buf.Len()) + } + if !bytes.Equal(bts, buf.Bytes()) { + t.Fatal("buf.Bytes() is not the same as the input bytes") + } +} + +func TestWriteString(t *testing.T) { + nbts := 3998 + str := string(randomBts(nbts)) + var buf bytes.Buffer + wr := NewWriterSize(&buf, 1137) + + if wr.BufferSize() != 1137 { + t.Fatalf("expected BufferSize() to return %d; returned %d", 1137, wr.BufferSize()) + } + + cwr := chunkedWriter{wr} + nb, err := cwr.WriteString(str) + if err != nil { + t.Fatal(err) + } + if nb != nbts { + t.Fatalf("expected to write %d bytes; wrote %d bytes", nbts, nb) + } + + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + if wr.Buffered() != 0 { + t.Fatalf("expected 0 buffered bytes; found %d", wr.Buffered()) + } + + if buf.Len() != nbts { + t.Fatalf("wrote %d bytes, buf buffer is %d bytes long", nbts, buf.Len()) + } + if buf.String() != str { + t.Fatal("buf.String() is not the same as input string") + } +} + +func TestWriteByte(t *testing.T) { + nbts := 3200 + bts := randomBts(nbts) + var buf bytes.Buffer + wr := NewWriter(&buf) + + if wr.BufferSize() != DefaultWriterSize { + t.Fatalf("expected BufferSize() to return %d; returned %d", DefaultWriterSize, wr.BufferSize()) + } + + // write byte-by-byte + for _, b := range bts { + if err := wr.WriteByte(b); err != nil { + t.Fatal(err) + } + } + + err := wr.Flush() + if err != nil { + t.Fatal(err) + } + + if buf.Len() != nbts { + t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len()) + } + + if !bytes.Equal(buf.Bytes(), bts) { + t.Fatal("buf.Bytes() and input are not equal") + } +} + +func TestWriterNext(t *testing.T) { + nbts := 1871 + bts := randomBts(nbts) + var buf bytes.Buffer + wr := NewWriterSize(&buf, 500) + nwr := nextWriter{wr} + + nb, err := nwr.Write(bts) + if err != nil { + t.Fatal(err) + } + + if nb != nbts { + t.Fatalf("expected to write %d bytes; wrote %d", nbts, nb) + } + + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + if buf.Len() != nbts { + t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len()) + } + + if !bytes.Equal(buf.Bytes(), bts) { + t.Fatal("buf.Bytes() and input are not equal") + } +} + +func TestReadFrom(t *testing.T) { + nbts := 2139 + bts := randomBts(nbts) + var buf bytes.Buffer + wr := NewWriterSize(&buf, 987) + + rd := partialReader{bytes.NewReader(bts)} + + nb, err := wr.ReadFrom(rd) + if err != nil { + t.Fatal(err) + } + if nb != int64(nbts) { + t.Fatalf("expeted to write %d bytes; wrote %d", nbts, nb) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + if buf.Len() != nbts { + t.Fatalf("expected buf.Len() to be %d; got %d", nbts, buf.Len()) + } + if !bytes.Equal(buf.Bytes(), bts) { + t.Fatal("buf.Bytes() and input are not equal") + } + +} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 0000000000..588ceca183 --- /dev/null +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,11 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.4.3 + - 1.5.4 + - 1.6.2 + - 1.7.1 + - tip + +script: + - go test -v ./... diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000000..835ba3e755 --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 0000000000..273db3c98a --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,52 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Contributing + +We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. + +Before proposing a change, please discuss your change by raising an issue. + +## Licence + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 0000000000..a932eade02 --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/bench_test.go b/vendor/github.com/pkg/errors/bench_test.go new file mode 100644 index 0000000000..0416a3cbb8 --- /dev/null +++ b/vendor/github.com/pkg/errors/bench_test.go @@ -0,0 +1,59 @@ +// +build go1.7 + +package errors + +import ( + "fmt" + "testing" + + stderrors "errors" +) + +func noErrors(at, depth int) error { + if at >= depth { + return stderrors.New("no error") + } + return noErrors(at+1, depth) +} +func yesErrors(at, depth int) error { + if at >= depth { + return New("ye error") + } + return yesErrors(at+1, depth) +} + +func BenchmarkErrors(b *testing.B) { + var toperr error + type run struct { + stack int + std bool + } + runs := []run{ + {10, false}, + {10, true}, + {100, false}, + {100, true}, + {1000, false}, + {1000, true}, + } + for _, r := range runs { + part := "pkg/errors" + if r.std { + part = "errors" + } + name := fmt.Sprintf("%s-stack-%d", part, r.stack) + b.Run(name, func(b *testing.B) { + var err error + f := yesErrors + if r.std { + f = noErrors + } + b.ReportAllocs() + for i := 0; i < b.N; i++ { + err = f(0, r.stack) + } + b.StopTimer() + toperr = err + }) + } +} diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000000..842ee80456 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/errors_test.go b/vendor/github.com/pkg/errors/errors_test.go new file mode 100644 index 0000000000..1d8c635586 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors_test.go @@ -0,0 +1,226 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "reflect" + "testing" +) + +func TestNew(t *testing.T) { + tests := []struct { + err string + want error + }{ + {"", fmt.Errorf("")}, + {"foo", fmt.Errorf("foo")}, + {"foo", New("foo")}, + {"string with format specifiers: %v", errors.New("string with format specifiers: %v")}, + } + + for _, tt := range tests { + got := New(tt.err) + if got.Error() != tt.want.Error() { + t.Errorf("New.Error(): got: %q, want %q", got, tt.want) + } + } +} + +func TestWrapNil(t *testing.T) { + got := Wrap(nil, "no error") + if got != nil { + t.Errorf("Wrap(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrap(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrap(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := Wrap(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrap(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +type nilError struct{} + +func (nilError) Error() string { return "nil error" } + +func TestCause(t *testing.T) { + x := New("error") + tests := []struct { + err error + want error + }{{ + // nil error is nil + err: nil, + want: nil, + }, { + // explicit nil error is nil + err: (error)(nil), + want: nil, + }, { + // typed nil is nil + err: (*nilError)(nil), + want: (*nilError)(nil), + }, { + // uncaused error is unaffected + err: io.EOF, + want: io.EOF, + }, { + // caused error returns cause + err: Wrap(io.EOF, "ignored"), + want: io.EOF, + }, { + err: x, // return from errors.New + want: x, + }, { + WithMessage(nil, "whoops"), + nil, + }, { + WithMessage(io.EOF, "whoops"), + io.EOF, + }, { + WithStack(nil), + nil, + }, { + WithStack(io.EOF), + io.EOF, + }} + + for i, tt := range tests { + got := Cause(tt.err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("test %d: got %#v, want %#v", i+1, got, tt.want) + } + } +} + +func TestWrapfNil(t *testing.T) { + got := Wrapf(nil, "no error") + if got != nil { + t.Errorf("Wrapf(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWrapf(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {Wrapf(io.EOF, "read error without format specifiers"), "client error", "client error: read error without format specifiers: EOF"}, + {Wrapf(io.EOF, "read error with %d format specifier", 1), "client error", "client error: read error with 1 format specifier: EOF"}, + } + + for _, tt := range tests { + got := Wrapf(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("Wrapf(%v, %q): got: %v, want %v", tt.err, tt.message, got, tt.want) + } + } +} + +func TestErrorf(t *testing.T) { + tests := []struct { + err error + want string + }{ + {Errorf("read error without format specifiers"), "read error without format specifiers"}, + {Errorf("read error with %d format specifier", 1), "read error with 1 format specifier"}, + } + + for _, tt := range tests { + got := tt.err.Error() + if got != tt.want { + t.Errorf("Errorf(%v): got: %q, want %q", tt.err, got, tt.want) + } + } +} + +func TestWithStackNil(t *testing.T) { + got := WithStack(nil) + if got != nil { + t.Errorf("WithStack(nil): got %#v, expected nil", got) + } +} + +func TestWithStack(t *testing.T) { + tests := []struct { + err error + want string + }{ + {io.EOF, "EOF"}, + {WithStack(io.EOF), "EOF"}, + } + + for _, tt := range tests { + got := WithStack(tt.err).Error() + if got != tt.want { + t.Errorf("WithStack(%v): got: %v, want %v", tt.err, got, tt.want) + } + } +} + +func TestWithMessageNil(t *testing.T) { + got := WithMessage(nil, "no error") + if got != nil { + t.Errorf("WithMessage(nil, \"no error\"): got %#v, expected nil", got) + } +} + +func TestWithMessage(t *testing.T) { + tests := []struct { + err error + message string + want string + }{ + {io.EOF, "read error", "read error: EOF"}, + {WithMessage(io.EOF, "read error"), "client error", "client error: read error: EOF"}, + } + + for _, tt := range tests { + got := WithMessage(tt.err, tt.message).Error() + if got != tt.want { + t.Errorf("WithMessage(%v, %q): got: %q, want %q", tt.err, tt.message, got, tt.want) + } + } + +} + +// errors.New, etc values are not expected to be compared by value +// but the change in errors#27 made them incomparable. Assert that +// various kinds of errors have a functional equality operator, even +// if the result of that equality is always false. +func TestErrorEquality(t *testing.T) { + vals := []error{ + nil, + io.EOF, + errors.New("EOF"), + New("EOF"), + Errorf("EOF"), + Wrap(io.EOF, "EOF"), + Wrapf(io.EOF, "EOF%d", 2), + WithMessage(nil, "whoops"), + WithMessage(io.EOF, "whoops"), + WithStack(io.EOF), + WithStack(nil), + } + + for i := range vals { + for j := range vals { + _ = vals[i] == vals[j] // mustn't panic + } + } +} diff --git a/vendor/github.com/pkg/errors/example_test.go b/vendor/github.com/pkg/errors/example_test.go new file mode 100644 index 0000000000..c1fc13e384 --- /dev/null +++ b/vendor/github.com/pkg/errors/example_test.go @@ -0,0 +1,205 @@ +package errors_test + +import ( + "fmt" + + "github.com/pkg/errors" +) + +func ExampleNew() { + err := errors.New("whoops") + fmt.Println(err) + + // Output: whoops +} + +func ExampleNew_printf() { + err := errors.New("whoops") + fmt.Printf("%+v", err) + + // Example output: + // whoops + // github.com/pkg/errors_test.ExampleNew_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:17 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func ExampleWithMessage() { + cause := errors.New("whoops") + err := errors.WithMessage(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func ExampleWithStack() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Println(err) + + // Output: whoops +} + +func ExampleWithStack_printf() { + cause := errors.New("whoops") + err := errors.WithStack(cause) + fmt.Printf("%+v", err) + + // Example Output: + // whoops + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 + // github.com/pkg/errors_test.ExampleWithStack_printf + // /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56 + // testing.runExample + // /usr/lib/go/src/testing/example.go:114 + // testing.RunExamples + // /usr/lib/go/src/testing/example.go:38 + // testing.(*M).Run + // /usr/lib/go/src/testing/testing.go:744 + // main.main + // github.com/pkg/errors/_test/_testmain.go:106 + // runtime.main + // /usr/lib/go/src/runtime/proc.go:183 + // runtime.goexit + // /usr/lib/go/src/runtime/asm_amd64.s:2086 +} + +func ExampleWrap() { + cause := errors.New("whoops") + err := errors.Wrap(cause, "oh noes") + fmt.Println(err) + + // Output: oh noes: whoops +} + +func fn() error { + e1 := errors.New("error") + e2 := errors.Wrap(e1, "inner") + e3 := errors.Wrap(e2, "middle") + return errors.Wrap(e3, "outer") +} + +func ExampleCause() { + err := fn() + fmt.Println(err) + fmt.Println(errors.Cause(err)) + + // Output: outer: middle: inner: error + // error +} + +func ExampleWrap_extended() { + err := fn() + fmt.Printf("%+v\n", err) + + // Example output: + // error + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.ExampleCause_printf + // /home/dfc/src/github.com/pkg/errors/example_test.go:63 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:104 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer +} + +func ExampleWrapf() { + cause := errors.New("whoops") + err := errors.Wrapf(cause, "oh noes #%d", 2) + fmt.Println(err) + + // Output: oh noes #2: whoops +} + +func ExampleErrorf_extended() { + err := errors.Errorf("whoops: %s", "foo") + fmt.Printf("%+v", err) + + // Example output: + // whoops: foo + // github.com/pkg/errors_test.ExampleErrorf + // /home/dfc/src/github.com/pkg/errors/example_test.go:101 + // testing.runExample + // /home/dfc/go/src/testing/example.go:114 + // testing.RunExamples + // /home/dfc/go/src/testing/example.go:38 + // testing.(*M).Run + // /home/dfc/go/src/testing/testing.go:744 + // main.main + // /github.com/pkg/errors/_test/_testmain.go:102 + // runtime.main + // /home/dfc/go/src/runtime/proc.go:183 + // runtime.goexit + // /home/dfc/go/src/runtime/asm_amd64.s:2059 +} + +func Example_stackTrace() { + type stackTracer interface { + StackTrace() errors.StackTrace + } + + err, ok := errors.Cause(fn()).(stackTracer) + if !ok { + panic("oops, err does not implement stackTracer") + } + + st := err.StackTrace() + fmt.Printf("%+v", st[0:2]) // top two frames + + // Example output: + // github.com/pkg/errors_test.fn + // /home/dfc/src/github.com/pkg/errors/example_test.go:47 + // github.com/pkg/errors_test.Example_stackTrace + // /home/dfc/src/github.com/pkg/errors/example_test.go:127 +} + +func ExampleCause_printf() { + err := errors.Wrap(func() error { + return func() error { + return errors.Errorf("hello %s", fmt.Sprintf("world")) + }() + }(), "failed") + + fmt.Printf("%v", err) + + // Output: failed: hello world +} diff --git a/vendor/github.com/pkg/errors/format_test.go b/vendor/github.com/pkg/errors/format_test.go new file mode 100644 index 0000000000..15fd7d89d7 --- /dev/null +++ b/vendor/github.com/pkg/errors/format_test.go @@ -0,0 +1,535 @@ +package errors + +import ( + "errors" + "fmt" + "io" + "regexp" + "strings" + "testing" +) + +func TestFormatNew(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + New("error"), + "%s", + "error", + }, { + New("error"), + "%v", + "error", + }, { + New("error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatNew\n" + + "\t.+/github.com/pkg/errors/format_test.go:26", + }, { + New("error"), + "%q", + `"error"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatErrorf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Errorf("%s", "error"), + "%s", + "error", + }, { + Errorf("%s", "error"), + "%v", + "error", + }, { + Errorf("%s", "error"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatErrorf\n" + + "\t.+/github.com/pkg/errors/format_test.go:56", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrap(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrap(New("error"), "error2"), + "%s", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%v", + "error2: error", + }, { + Wrap(New("error"), "error2"), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:82", + }, { + Wrap(io.EOF, "error"), + "%s", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%v", + "error: EOF", + }, { + Wrap(io.EOF, "error"), + "%+v", + "EOF\n" + + "error\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:96", + }, { + Wrap(Wrap(io.EOF, "error1"), "error2"), + "%+v", + "EOF\n" + + "error1\n" + + "github.com/pkg/errors.TestFormatWrap\n" + + "\t.+/github.com/pkg/errors/format_test.go:103\n", + }, { + Wrap(New("error with space"), "context"), + "%q", + `"context: error with space"`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWrapf(t *testing.T) { + tests := []struct { + error + format string + want string + }{{ + Wrapf(io.EOF, "error%d", 2), + "%s", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%v", + "error2: EOF", + }, { + Wrapf(io.EOF, "error%d", 2), + "%+v", + "EOF\n" + + "error2\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:134", + }, { + Wrapf(New("error"), "error%d", 2), + "%s", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%v", + "error2: error", + }, { + Wrapf(New("error"), "error%d", 2), + "%+v", + "error\n" + + "github.com/pkg/errors.TestFormatWrapf\n" + + "\t.+/github.com/pkg/errors/format_test.go:149", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.error, tt.format, tt.want) + } +} + +func TestFormatWithStack(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithStack(io.EOF), + "%s", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%v", + []string{"EOF"}, + }, { + WithStack(io.EOF), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:175"}, + }, { + WithStack(New("error")), + "%s", + []string{"error"}, + }, { + WithStack(New("error")), + "%v", + []string{"error"}, + }, { + WithStack(New("error")), + "%+v", + []string{"error", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:189"}, + }, { + WithStack(WithStack(io.EOF)), + "%+v", + []string{"EOF", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:197"}, + }, { + WithStack(WithStack(Wrapf(io.EOF, "message"))), + "%+v", + []string{"EOF", + "message", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:205"}, + }, { + WithStack(Errorf("error%d", 1)), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216", + "github.com/pkg/errors.TestFormatWithStack\n" + + "\t.+/github.com/pkg/errors/format_test.go:216"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatWithMessage(t *testing.T) { + tests := []struct { + error + format string + want []string + }{{ + WithMessage(New("error"), "error2"), + "%s", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%v", + []string{"error2: error"}, + }, { + WithMessage(New("error"), "error2"), + "%+v", + []string{ + "error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:244", + "error2"}, + }, { + WithMessage(io.EOF, "addition1"), + "%s", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%v", + []string{"addition1: EOF"}, + }, { + WithMessage(io.EOF, "addition1"), + "%+v", + []string{"EOF", "addition1"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%v", + []string{"addition2: addition1: EOF"}, + }, { + WithMessage(WithMessage(io.EOF, "addition1"), "addition2"), + "%+v", + []string{"EOF", "addition1", "addition2"}, + }, { + Wrap(WithMessage(io.EOF, "error1"), "error2"), + "%+v", + []string{"EOF", "error1", "error2", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:272"}, + }, { + WithMessage(Errorf("error%d", 1), "error2"), + "%+v", + []string{"error1", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:278", + "error2"}, + }, { + WithMessage(WithStack(io.EOF), "error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:285", + "error"}, + }, { + WithMessage(Wrap(WithStack(io.EOF), "inside-error"), "outside-error"), + "%+v", + []string{ + "EOF", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "inside-error", + "github.com/pkg/errors.TestFormatWithMessage\n" + + "\t.+/github.com/pkg/errors/format_test.go:293", + "outside-error"}, + }} + + for i, tt := range tests { + testFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true) + } +} + +func TestFormatGeneric(t *testing.T) { + starts := []struct { + err error + want []string + }{ + {New("new-error"), []string{ + "new-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:315"}, + }, {Errorf("errorf-error"), []string{ + "errorf-error", + "github.com/pkg/errors.TestFormatGeneric\n" + + "\t.+/github.com/pkg/errors/format_test.go:319"}, + }, {errors.New("errors-new-error"), []string{ + "errors-new-error"}, + }, + } + + wrappers := []wrapper{ + { + func(err error) error { return WithMessage(err, "with-message") }, + []string{"with-message"}, + }, { + func(err error) error { return WithStack(err) }, + []string{ + "github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\n\t" + + ".+/github.com/pkg/errors/format_test.go:333", + }, + }, { + func(err error) error { return Wrap(err, "wrap-error") }, + []string{ + "wrap-error", + "github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\n\t" + + ".+/github.com/pkg/errors/format_test.go:339", + }, + }, { + func(err error) error { return Wrapf(err, "wrapf-error%d", 1) }, + []string{ + "wrapf-error1", + "github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\n\t" + + ".+/github.com/pkg/errors/format_test.go:346", + }, + }, + } + + for s := range starts { + err := starts[s].err + want := starts[s].want + testFormatCompleteCompare(t, s, err, "%+v", want, false) + testGenericRecursive(t, err, want, wrappers, 3) + } +} + +func testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) { + got := fmt.Sprintf(format, arg) + gotLines := strings.SplitN(got, "\n", -1) + wantLines := strings.SplitN(want, "\n", -1) + + if len(wantLines) > len(gotLines) { + t.Errorf("test %d: wantLines(%d) > gotLines(%d):\n got: %q\nwant: %q", n+1, len(wantLines), len(gotLines), got, want) + return + } + + for i, w := range wantLines { + match, err := regexp.MatchString(w, gotLines[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Errorf("test %d: line %d: fmt.Sprintf(%q, err):\n got: %q\nwant: %q", n+1, i+1, format, got, want) + } + } +} + +var stackLineR = regexp.MustCompile(`\.`) + +// parseBlocks parses input into a slice, where: +// - incase entry contains a newline, its a stacktrace +// - incase entry contains no newline, its a solo line. +// +// Detecting stack boundaries only works incase the WithStack-calls are +// to be found on the same line, thats why it is optionally here. +// +// Example use: +// +// for _, e := range blocks { +// if strings.ContainsAny(e, "\n") { +// // Match as stack +// } else { +// // Match as line +// } +// } +// +func parseBlocks(input string, detectStackboundaries bool) ([]string, error) { + var blocks []string + + stack := "" + wasStack := false + lines := map[string]bool{} // already found lines + + for _, l := range strings.Split(input, "\n") { + isStackLine := stackLineR.MatchString(l) + + switch { + case !isStackLine && wasStack: + blocks = append(blocks, stack, l) + stack = "" + lines = map[string]bool{} + case isStackLine: + if wasStack { + // Detecting two stacks after another, possible cause lines match in + // our tests due to WithStack(WithStack(io.EOF)) on same line. + if detectStackboundaries { + if lines[l] { + if len(stack) == 0 { + return nil, errors.New("len of block must not be zero here") + } + + blocks = append(blocks, stack) + stack = l + lines = map[string]bool{l: true} + continue + } + } + + stack = stack + "\n" + l + } else { + stack = l + } + lines[l] = true + case !isStackLine && !wasStack: + blocks = append(blocks, l) + default: + return nil, errors.New("must not happen") + } + + wasStack = isStackLine + } + + // Use up stack + if stack != "" { + blocks = append(blocks, stack) + } + return blocks, nil +} + +func testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) { + gotStr := fmt.Sprintf(format, arg) + + got, err := parseBlocks(gotStr, detectStackBoundaries) + if err != nil { + t.Fatal(err) + } + + if len(got) != len(want) { + t.Fatalf("test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\n got: %s\nwant: %s\ngotStr: %q", + n+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr) + } + + for i := range got { + if strings.ContainsAny(want[i], "\n") { + // Match as stack + match, err := regexp.MatchString(want[i], got[i]) + if err != nil { + t.Fatal(err) + } + if !match { + t.Fatalf("test %d: block %d: fmt.Sprintf(%q, err):\ngot:\n%q\nwant:\n%q\nall-got:\n%s\nall-want:\n%s\n", + n+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want)) + } + } else { + // Match as message + if got[i] != want[i] { + t.Fatalf("test %d: fmt.Sprintf(%s, err) at block %d got != want:\n got: %q\nwant: %q", n+1, format, i+1, got[i], want[i]) + } + } + } +} + +type wrapper struct { + wrap func(err error) error + want []string +} + +func prettyBlocks(blocks []string, prefix ...string) string { + var out []string + + for _, b := range blocks { + out = append(out, fmt.Sprintf("%v", b)) + } + + return " " + strings.Join(out, "\n ") +} + +func testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) { + if len(beforeWant) == 0 { + panic("beforeWant must not be empty") + } + for _, w := range list { + if len(w.want) == 0 { + panic("want must not be empty") + } + + err := w.wrap(beforeErr) + + // Copy required cause append(beforeWant, ..) modified beforeWant subtly. + beforeCopy := make([]string, len(beforeWant)) + copy(beforeCopy, beforeWant) + + beforeWant := beforeCopy + last := len(beforeWant) - 1 + var want []string + + // Merge two stacks behind each other. + if strings.ContainsAny(beforeWant[last], "\n") && strings.ContainsAny(w.want[0], "\n") { + want = append(beforeWant[:last], append([]string{beforeWant[last] + "((?s).*)" + w.want[0]}, w.want[1:]...)...) + } else { + want = append(beforeWant, w.want...) + } + + testFormatCompleteCompare(t, maxDepth, err, "%+v", want, false) + if maxDepth > 0 { + testGenericRecursive(t, err, want, list, maxDepth-1) + } + } +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000000..6b1f2891a5 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,178 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/github.com/pkg/errors/stack_test.go b/vendor/github.com/pkg/errors/stack_test.go new file mode 100644 index 0000000000..510c27a9f9 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack_test.go @@ -0,0 +1,292 @@ +package errors + +import ( + "fmt" + "runtime" + "testing" +) + +var initpc, _, _, _ = runtime.Caller(0) + +func TestFrameLine(t *testing.T) { + var tests = []struct { + Frame + want int + }{{ + Frame(initpc), + 9, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) + }(), + 20, + }, { + func() Frame { + var pc, _, _, _ = runtime.Caller(1) + return Frame(pc) + }(), + 28, + }, { + Frame(0), // invalid PC + 0, + }} + + for _, tt := range tests { + got := tt.Frame.line() + want := tt.want + if want != got { + t.Errorf("Frame(%v): want: %v, got: %v", uintptr(tt.Frame), want, got) + } + } +} + +type X struct{} + +func (x X) val() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func (x *X) ptr() Frame { + var pc, _, _, _ = runtime.Caller(0) + return Frame(pc) +} + +func TestFrameFormat(t *testing.T) { + var tests = []struct { + Frame + format string + want string + }{{ + Frame(initpc), + "%s", + "stack_test.go", + }, { + Frame(initpc), + "%+s", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go", + }, { + Frame(0), + "%s", + "unknown", + }, { + Frame(0), + "%+s", + "unknown", + }, { + Frame(initpc), + "%d", + "9", + }, { + Frame(0), + "%d", + "0", + }, { + Frame(initpc), + "%n", + "init", + }, { + func() Frame { + var x X + return x.ptr() + }(), + "%n", + `\(\*X\).ptr`, + }, { + func() Frame { + var x X + return x.val() + }(), + "%n", + "X.val", + }, { + Frame(0), + "%n", + "", + }, { + Frame(initpc), + "%v", + "stack_test.go:9", + }, { + Frame(initpc), + "%+v", + "github.com/pkg/errors.init\n" + + "\t.+/github.com/pkg/errors/stack_test.go:9", + }, { + Frame(0), + "%v", + "unknown:0", + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.Frame, tt.format, tt.want) + } +} + +func TestFuncname(t *testing.T) { + tests := []struct { + name, want string + }{ + {"", ""}, + {"runtime.main", "main"}, + {"github.com/pkg/errors.funcname", "funcname"}, + {"funcname", "funcname"}, + {"io.copyBuffer", "copyBuffer"}, + {"main.(*R).Write", "(*R).Write"}, + } + + for _, tt := range tests { + got := funcname(tt.name) + want := tt.want + if got != want { + t.Errorf("funcname(%q): want: %q, got %q", tt.name, want, got) + } + } +} + +func TestTrimGOPATH(t *testing.T) { + var tests = []struct { + Frame + want string + }{{ + Frame(initpc), + "github.com/pkg/errors/stack_test.go", + }} + + for i, tt := range tests { + pc := tt.Frame.pc() + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + got := trimGOPATH(fn.Name(), file) + testFormatRegexp(t, i, got, "%s", tt.want) + } +} + +func TestStackTrace(t *testing.T) { + tests := []struct { + err error + want []string + }{{ + New("ooh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:172", + }, + }, { + Wrap(New("ooh"), "ahh"), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:177", // this is the stack of Wrap, not New + }, + }, { + Cause(Wrap(New("ooh"), "ahh")), []string{ + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:182", // this is the stack of New + }, + }, { + func() error { return New("ooh") }(), []string{ + `github.com/pkg/errors.(func·009|TestStackTrace.func1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:187", // this is the stack of New's caller + }, + }, { + Cause(func() error { + return func() error { + return Errorf("hello %s", fmt.Sprintf("world")) + }() + }()), []string{ + `github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:196", // this is the stack of Errorf + `github.com/pkg/errors.(func·011|TestStackTrace.func2)` + + "\n\t.+/github.com/pkg/errors/stack_test.go:197", // this is the stack of Errorf's caller + "github.com/pkg/errors.TestStackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:198", // this is the stack of Errorf's caller's caller + }, + }} + for i, tt := range tests { + x, ok := tt.err.(interface { + StackTrace() StackTrace + }) + if !ok { + t.Errorf("expected %#v to implement StackTrace() StackTrace", tt.err) + continue + } + st := x.StackTrace() + for j, want := range tt.want { + testFormatRegexp(t, i, st[j], "%+v", want) + } + } +} + +func stackTrace() StackTrace { + const depth = 8 + var pcs [depth]uintptr + n := runtime.Callers(1, pcs[:]) + var st stack = pcs[0:n] + return st.StackTrace() +} + +func TestStackTraceFormat(t *testing.T) { + tests := []struct { + StackTrace + format string + want string + }{{ + nil, + "%s", + `\[\]`, + }, { + nil, + "%v", + `\[\]`, + }, { + nil, + "%+v", + "", + }, { + nil, + "%#v", + `\[\]errors.Frame\(nil\)`, + }, { + make(StackTrace, 0), + "%s", + `\[\]`, + }, { + make(StackTrace, 0), + "%v", + `\[\]`, + }, { + make(StackTrace, 0), + "%+v", + "", + }, { + make(StackTrace, 0), + "%#v", + `\[\]errors.Frame{}`, + }, { + stackTrace()[:2], + "%s", + `\[stack_test.go stack_test.go\]`, + }, { + stackTrace()[:2], + "%v", + `\[stack_test.go:225 stack_test.go:272\]`, + }, { + stackTrace()[:2], + "%+v", + "\n" + + "github.com/pkg/errors.stackTrace\n" + + "\t.+/github.com/pkg/errors/stack_test.go:225\n" + + "github.com/pkg/errors.TestStackTraceFormat\n" + + "\t.+/github.com/pkg/errors/stack_test.go:276", + }, { + stackTrace()[:2], + "%#v", + `\[\]errors.Frame{stack_test.go:225, stack_test.go:284}`, + }} + + for i, tt := range tests { + testFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want) + } +} diff --git a/vendor/github.com/raintank/dur/datetime_test.go b/vendor/github.com/raintank/dur/datetime_test.go new file mode 100644 index 0000000000..2f4dc36805 --- /dev/null +++ b/vendor/github.com/raintank/dur/datetime_test.go @@ -0,0 +1,137 @@ +package dur + +import ( + "fmt" + "testing" + "time" +) + +const outFormat = "15:04:05 2006-Jan-02" + +func TestParseDateTime(t *testing.T) { + + // Let's use CET (UTC+1) as our TZ for the tests. It shows that custom TZ support works, + // while being minimally different from UTC in case you want to follow the logic/math. + // IOW things like "midnight" will be in this TZ and the nows that are used are also + // interpreted in this TZ (the same "now" can be a different day in different timezones) + loc, err := time.LoadLocation("CET") + if err != nil { + panic(err) + } + + // a few values for "now" + jul8 := time.Date(2017, time.July, 8, 15, 30, 0, 0, loc) // saturday 8 Jul 2017 15:30 CET + jul9 := time.Date(2017, time.July, 9, 15, 30, 0, 0, loc) // sunday 9 Jul 2017 15:30 CET + jul10 := time.Date(2017, time.July, 10, 15, 30, 0, 0, loc) // monday 10 Jul 2017 15:30 CET + jul11 := time.Date(2017, time.July, 11, 15, 30, 0, 0, loc) // tuesday 11 Jul 2017 15:30 CET + + var tests = []struct { + in string + now time.Time + expTsStr string + expErr error + }{ + {"now", jul8, "15:30:00 2017-Jul-08", nil}, + {"-1d", jul8, "15:30:00 2017-Jul-07", nil}, + {"-7d", jul8, "15:30:00 2017-Jul-01", nil}, + {"-10d", jul8, "15:30:00 2017-Jun-28", nil}, + {"-10d5h", jul8, "10:30:00 2017-Jun-28", nil}, + {"now-1d", jul8, "15:30:00 2017-Jul-07", nil}, + {"now-7d", jul8, "15:30:00 2017-Jul-01", nil}, + {"now-10d", jul8, "15:30:00 2017-Jun-28", nil}, + {"now-10d5h", jul8, "10:30:00 2017-Jun-28", nil}, + + // YYYYMMDD + {"20091201", jul8, "00:00:00 2009-Dec-01", nil}, // graphite assumes 00:00:00 + {"20091231", jul8, "00:00:00 2009-Dec-31", nil}, // graphite assumes 00:00:00 + + // HH:MM_YYMMDD + {"04:00_20110501", jul8, "04:00:00 2011-May-01", nil}, + {"16:00_20110501", jul8, "16:00:00 2011-May-01", nil}, + + // HH:MM YYMMDD + {"4:00 20110501", jul8, "04:00:00 2011-May-01", nil}, + {"04:00 20110501", jul8, "04:00:00 2011-May-01", nil}, + {"16:00 20110501", jul8, "16:00:00 2011-May-01", nil}, + + // a unix timestamp which cannot be confused with YYYYMMDD format above + // one of my favorites. valentines day 2009 in central europe. + {"1234567890", jul8, "00:31:30 2009-Feb-14", nil}, + + // MM/DD/YY + {"01/02/2000", jul8, "00:00:00 2000-Jan-02", nil}, + {"05/01/2000", jul8, "00:00:00 2000-May-01", nil}, + + // other at(1)-compatible time format. + {"midnight", jul8, "00:00:00 2017-Jul-08", nil}, + {"noon", jul8, "12:00:00 2017-Jul-08", nil}, + {"teatime", jul8, "16:00:00 2017-Jul-08", nil}, + + {"yesterday", jul8, "00:00:00 2017-Jul-07", nil}, // graphite assumes 00:00:00 + {"today", jul8, "00:00:00 2017-Jul-08", nil}, // graphite assumes 00:00:00 + {"tomorrow", jul8, "00:00:00 2017-Jul-09", nil}, // graphite assumes 00:00:00 I think, but hard to tell, because graphite doesn't return future data + + {"noon yesterday", jul8, "12:00:00 2017-Jul-07", nil}, + {"3am tomorrow", jul8, "03:00:00 2017-Jul-09", nil}, + {"3AM tomorrow", jul8, "03:00:00 2017-Jul-09", nil}, + {"6pm today", jul8, "18:00:00 2017-Jul-08", nil}, + {"6PM today", jul8, "18:00:00 2017-Jul-08", nil}, + {"6:00PM today", jul8, "18:00:00 2017-Jul-08", nil}, + {"06:00PM today", jul8, "18:00:00 2017-Jul-08", nil}, + {"06PM today", jul8, "18:00:00 2017-Jul-08", nil}, + {"january 1", jul8, "00:00:00 2017-Jan-01", nil}, + {"jan 1", jul8, "00:00:00 2017-Jan-01", nil}, + {"march 8", jul8, "00:00:00 2017-Mar-08", nil}, + {"mar 8", jul8, "00:00:00 2017-Mar-08", nil}, + {"monday", jul8, "00:00:00 2017-Jul-03", nil}, + {"monday", jul9, "00:00:00 2017-Jul-03", nil}, + {"monday", jul10, "00:00:00 2017-Jul-10", nil}, + {"monday", jul11, "00:00:00 2017-Jul-10", nil}, + + {"noon 08/12/98", jul8, "12:00:00 1998-Aug-12", nil}, + {"noon 08/12/2002", jul8, "12:00:00 2002-Aug-12", nil}, + {"midnight 20170812", jul8, "00:00:00 2017-Aug-12", nil}, + {"noon tomorrow", jul8, "12:00:00 2017-Jul-09", nil}, + } + + for i, tt := range tests { + ts, err := ParseDateTime(tt.in, loc, tt.now, 0) + if tt.expErr == nil && err != nil { + t.Errorf("case %d: ParseDateTime(%q, %d, 0) expected err nil, got err %v", i, tt.in, tt.now, err) + } + if tt.expErr != nil && err == nil { + t.Errorf("case %d: ParseDateTime(%q, %d, 0) expected err %v, got err nil", i, tt.in, tt.now, tt.expErr) + } + + expTime, err := time.ParseInLocation(outFormat, tt.expTsStr, loc) + if err != nil { + panic(fmt.Sprintf("case %d: error parsing expTime out of input %q with format str %q: %v", i, tt.expTsStr, outFormat, err)) + } + expTs := uint32(expTime.Unix()) + + if ts != expTs { + t.Errorf("case %d: ParseDateTime(%q, %d, 0) expected %d (%q) got %d (%q)", i, tt.in, tt.now, expTs, tt.expTsStr, ts, time.Unix(int64(ts), 0).In(loc).Format(outFormat)) + } + } +} + +var res uint32 + +func BenchmarkParseDateTime(b *testing.B) { + patts := []string{"now", "-7d", "now-7d10h3min", "today", "monday", "march 10", "04:00 20110501", "4pm 20110501", "1234567890", "teatime 12/25/1998"} + loc, err := time.LoadLocation("CET") + if err != nil { + panic(err) + } + now := time.Date(2017, time.July, 8, 15, 30, 0, 0, loc) // saturday 8 Jul 2017 15:30 CET + for _, patt := range patts { + b.Run(patt, func(b *testing.B) { + for i := 0; i < b.N; i++ { + res, err = ParseDateTime(patt, loc, now, 0) + } + if err != nil { + panic(err) + } + }) + } +} diff --git a/vendor/github.com/raintank/dur/duration_test.go b/vendor/github.com/raintank/dur/duration_test.go new file mode 100644 index 0000000000..44b0bdc9c4 --- /dev/null +++ b/vendor/github.com/raintank/dur/duration_test.go @@ -0,0 +1,40 @@ +package dur + +import "testing" + +func TestDuration(t *testing.T) { + var cases = []struct { + in string + out uint32 + err bool + }{ + {"", 0, true}, + {"0", 0, false}, + {"-1", 0, true}, + {"1", 1, false}, + {"3600", 3600, false}, + {"1000000000", 1000000000, false}, + {"1us", 0, true}, + {"1ms", 0, true}, + {"1000ms", 0, true}, + {"1m", 60, false}, + {"1min", 60, false}, + {"1h", 3600, false}, + {"1s", 1, false}, + {"2d", 2 * 60 * 60 * 24, false}, + {"10hours", 60 * 60 * 10, false}, + {"7d13h45min21s", 7*24*60*60 + 13*60*60 + 45*60 + 21, false}, + {"01hours", 60 * 60 * 1, false}, + {"2d2d", 4 * 60 * 60 * 24, false}, + } + + for i, c := range cases { + d, err := ParseDuration(c.in) + if (err != nil) != c.err { + t.Fatalf("case %d %q: expected err %t, got err %s", i, c.in, c.err, err) + } + if d != c.out { + t.Fatalf("case %d %q: expected %d, got %d", i, c.in, c.out, d) + } + } +} diff --git a/vendor/github.com/raintank/gziper/.gitignore b/vendor/github.com/raintank/gziper/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/raintank/gziper/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/raintank/gziper/gzip_test.go b/vendor/github.com/raintank/gziper/gzip_test.go new file mode 100644 index 0000000000..f0f84a0803 --- /dev/null +++ b/vendor/github.com/raintank/gziper/gzip_test.go @@ -0,0 +1,164 @@ +// Copyright 2013 Martini Authors +// Copyright 2015 The Macaron Authors +// Copyright 2017 Grafana Labs +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package gziper + +import ( + "bufio" + "net" + "net/http" + "net/http/httptest" + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" + "gopkg.in/macaron.v1" +) + +func Test_Gzip(t *testing.T) { + Convey("Gzip response content", t, func() { + before := false + + m := macaron.New() + m.Use(Gziper()) + m.Use(func(r http.ResponseWriter) { + r.(macaron.ResponseWriter).Before(func(rw macaron.ResponseWriter) { + before = true + }) + }) + m.Get("/", func() string { return "hello wolrd!" }) + + // Not yet gzip. + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + _, ok := resp.HeaderMap[_HEADER_CONTENT_ENCODING] + So(ok, ShouldBeFalse) + + ce := resp.Header().Get(_HEADER_CONTENT_ENCODING) + So(strings.EqualFold(ce, "gzip"), ShouldBeFalse) + + // Gzip now. + resp = httptest.NewRecorder() + req.Header.Set(_HEADER_ACCEPT_ENCODING, "gzip") + m.ServeHTTP(resp, req) + _, ok = resp.HeaderMap[_HEADER_CONTENT_ENCODING] + So(ok, ShouldBeTrue) + + ce = resp.Header().Get(_HEADER_CONTENT_ENCODING) + So(strings.EqualFold(ce, "gzip"), ShouldBeTrue) + + So(before, ShouldBeTrue) + }) +} + +func Test_Already_Gziped(t *testing.T) { + Convey("Already Gziped response content", t, func() { + m := macaron.New() + m.Use(Gziper()) + m.Get("/", func(ctx *macaron.Context) { + ctx.Header().Add(_HEADER_CONTENT_ENCODING, "gzip") + ctx.Resp.Write([]byte("raw data")) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + req.Header.Set(_HEADER_ACCEPT_ENCODING, "gzip") + + m.ServeHTTP(resp, req) + + _, ok := resp.HeaderMap[_HEADER_CONTENT_ENCODING] + So(ok, ShouldBeTrue) + + ce := resp.Header().Get(_HEADER_CONTENT_ENCODING) + So(strings.EqualFold(ce, "gzip"), ShouldBeTrue) + + // even thought the contentType says it is gzip, the + // body should contain only the string we wrote + So(resp.Body.String(), ShouldEqual, "raw data") + + }) +} + +type hijackableResponse struct { + Hijacked bool + header http.Header +} + +func newHijackableResponse() *hijackableResponse { + return &hijackableResponse{header: make(http.Header)} +} + +func (h *hijackableResponse) Header() http.Header { return h.header } +func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil } +func (h *hijackableResponse) WriteHeader(code int) {} +func (h *hijackableResponse) Flush() {} +func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h.Hijacked = true + return nil, nil, nil +} + +func Test_ResponseWriter_Hijack(t *testing.T) { + Convey("Hijack response", t, func() { + hijackable := newHijackableResponse() + + m := macaron.New() + m.Use(Gziper()) + m.Use(func(rw http.ResponseWriter) { + hj, ok := rw.(http.Hijacker) + So(ok, ShouldBeTrue) + + hj.Hijack() + }) + + r, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + + r.Header.Set(_HEADER_ACCEPT_ENCODING, "gzip") + m.ServeHTTP(hijackable, r) + + So(hijackable.Hijacked, ShouldBeTrue) + }) +} + +func Test_GzipPanic(t *testing.T) { + Convey("Gzip response content", t, func() { + before := false + + m := macaron.Classic() + m.Use(Gziper()) + m.Use(func(r http.ResponseWriter) { + r.(macaron.ResponseWriter).Before(func(rw macaron.ResponseWriter) { + before = true + }) + }) + m.Get("/", func(ctx *macaron.Context) { panic("test") }) + + // Gzip now. + resp := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/", nil) + req.Header.Set(_HEADER_ACCEPT_ENCODING, "gzip") + m.ServeHTTP(resp, req) + _, ok := resp.HeaderMap[_HEADER_CONTENT_ENCODING] + So(ok, ShouldBeFalse) + + So(resp.Body.String(), ShouldContainSubstring, "PANIC: test") + So(before, ShouldBeTrue) + }) +} diff --git a/vendor/github.com/raintank/met/LICENSE b/vendor/github.com/raintank/met/LICENSE new file mode 100644 index 0000000000..dba13ed2dd --- /dev/null +++ b/vendor/github.com/raintank/met/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/vendor/github.com/raintank/met/NOTICE b/vendor/github.com/raintank/met/NOTICE new file mode 100644 index 0000000000..facdf7028e --- /dev/null +++ b/vendor/github.com/raintank/met/NOTICE @@ -0,0 +1,15 @@ +Copyright 2016 Dieter Plaetinck, raintank inc + + +met is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +metrictank is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU General Public License +along with metrictank. If not, see . diff --git a/vendor/github.com/raintank/met/README.md b/vendor/github.com/raintank/met/README.md new file mode 100644 index 0000000000..4719c27bce --- /dev/null +++ b/vendor/github.com/raintank/met/README.md @@ -0,0 +1,15 @@ +an opinionated wrapper around metric client libraries + +imported from Grafana ffcc807ed34f853a8bc9600bcf7801547a5feb4f + +supports: +* statsd (recommended!) +* dogstatsd + +and later maybe more, like go-metrics + + +Why? +* make it easy to switch between libraries. +* some libraries just take in string arguments for gauge names etc, but it's nicer to have variables per object for robustness, especially when used in multiple places, and gives a central overview. +* allows you to set deleteGauges and deleteStats in your statsd servers ( a good thing for stateless servers), cause we will automatically keep a gauge sending. diff --git a/vendor/github.com/raintank/met/dogstatsd/count.go b/vendor/github.com/raintank/met/dogstatsd/count.go new file mode 100644 index 0000000000..beb519e9c3 --- /dev/null +++ b/vendor/github.com/raintank/met/dogstatsd/count.go @@ -0,0 +1,18 @@ +package dogstatsd + +import "github.com/raintank/met" + +type Count struct { + key string + backend Backend +} + +func (b Backend) NewCount(key string) met.Count { + c := Count{key, b} + c.Inc(0) + return c +} + +func (c Count) Inc(val int64) { + c.backend.client.Count(c.key, val, []string{}, 1) +} diff --git a/vendor/github.com/raintank/met/dogstatsd/gauge.go b/vendor/github.com/raintank/met/dogstatsd/gauge.go new file mode 100644 index 0000000000..e3836187a4 --- /dev/null +++ b/vendor/github.com/raintank/met/dogstatsd/gauge.go @@ -0,0 +1,51 @@ +package dogstatsd + +import ( + "sync" + "time" +) +import "github.com/raintank/met" + +type Gauge struct { + key string + val int64 + sync.Mutex + backend Backend +} + +func (b Backend) NewGauge(key string, val int64) met.Gauge { + g := Gauge{ + key: key, + backend: b, + } + go func() { + for { + g.Lock() + g.backend.client.Gauge(g.key, float64(g.val), []string{}, 1) + g.Unlock() + time.Sleep(time.Duration(1) * time.Second) + } + }() + return &g +} + +func (g *Gauge) Value(val int64) { + g.Lock() + defer g.Unlock() + g.val = val + g.backend.client.Gauge(g.key, float64(g.val), []string{}, 1) +} + +func (g *Gauge) Inc(val int64) { + g.Lock() + defer g.Unlock() + g.val += val + g.backend.client.Gauge(g.key, float64(g.val), []string{}, 1) +} + +func (g *Gauge) Dec(val int64) { + g.Lock() + defer g.Unlock() + g.val -= val + g.backend.client.Gauge(g.key, float64(g.val), []string{}, 1) +} diff --git a/vendor/github.com/raintank/met/dogstatsd/init.go b/vendor/github.com/raintank/met/dogstatsd/init.go new file mode 100644 index 0000000000..ccb968719f --- /dev/null +++ b/vendor/github.com/raintank/met/dogstatsd/init.go @@ -0,0 +1,27 @@ +// a metrics class that uses dogstatsd on the backend + +// note that on creation, we automatically send a default value so that: +// * influxdb doesn't complain when queried for series that don't exist yet, which breaks graphs in grafana +// * the series show up in your monitoring tool of choice, so you can easily do alerting rules, build dashes etc +// without having to wait for data. some series would otherwise only be created when things go badly wrong etc. +// note that for gauges and timers this can create inaccuracies because the true values are hard to predict, +// but it's worth the trade-off. +// (for count 0 is harmless and accurate) + +package dogstatsd + +import "github.com/DataDog/datadog-go/statsd" + +type Backend struct { + client *statsd.Client +} + +// note: library does not auto-add ending dot to prefix, specify it if you want it +func New(addr, prefix string, tags []string) (Backend, error) { + client, err := statsd.New(addr) + if err == nil { + client.Namespace = prefix + client.Tags = tags + } + return Backend{client}, err +} diff --git a/vendor/github.com/raintank/met/dogstatsd/meter.go b/vendor/github.com/raintank/met/dogstatsd/meter.go new file mode 100644 index 0000000000..0ed5437502 --- /dev/null +++ b/vendor/github.com/raintank/met/dogstatsd/meter.go @@ -0,0 +1,20 @@ +// it's commonly used for non-timer cases where we want these summaries, that's +// what this is for. +package dogstatsd + +import "github.com/raintank/met" + +type Meter struct { + key string + backend Backend +} + +func (b Backend) NewMeter(key string, val int64) met.Meter { + m := Meter{key, b} + m.Value(val) + return m +} + +func (m Meter) Value(val int64) { + m.backend.client.Histogram(m.key, float64(val), []string{}, 1) +} diff --git a/vendor/github.com/raintank/met/dogstatsd/timer.go b/vendor/github.com/raintank/met/dogstatsd/timer.go new file mode 100644 index 0000000000..711cf9ee49 --- /dev/null +++ b/vendor/github.com/raintank/met/dogstatsd/timer.go @@ -0,0 +1,23 @@ +package dogstatsd + +import "time" +import "github.com/raintank/met" + +// note that due the preseeding in init, you shouldn't rely on the count and count_ps summaries +// rather, consider maintaining a separate counter +// see https://github.com/raintank/grafana/issues/133 + +type Timer struct { + key string + backend Backend +} + +func (b Backend) NewTimer(key string, val time.Duration) met.Timer { + t := Timer{key, b} + t.Value(val) + return t +} + +func (t Timer) Value(val time.Duration) { + t.backend.client.TimeInMilliseconds(t.key, val.Seconds()*1000, []string{}, 1) +} diff --git a/vendor/github.com/raintank/met/helper/helper.go b/vendor/github.com/raintank/met/helper/helper.go new file mode 100644 index 0000000000..aef46e8a42 --- /dev/null +++ b/vendor/github.com/raintank/met/helper/helper.go @@ -0,0 +1,25 @@ +package helper + +import ( + "fmt" + + "github.com/raintank/met" + "github.com/raintank/met/dogstatsd" + "github.com/raintank/met/statsd" +) + +func New(enabled bool, addr, t, service, instance string) (met.Backend, error) { + if t != "standard" && t != "datadog" { + panic(fmt.Sprintf("unrecognized statsd type: '%s'", t)) + } + if !enabled { + // we could implement a true "null-backend" + // but since statsd supports disabled mode, this is easier + return statsd.New(enabled, addr, "") + } + if t == "standard" { + return statsd.New(enabled, addr, fmt.Sprintf("%s.%s.", service, instance)) + } else { + return dogstatsd.New(addr, service+".", []string{"instance:" + instance}) + } +} diff --git a/vendor/github.com/raintank/met/interfaces.go b/vendor/github.com/raintank/met/interfaces.go new file mode 100644 index 0000000000..cd8d14d2e8 --- /dev/null +++ b/vendor/github.com/raintank/met/interfaces.go @@ -0,0 +1,35 @@ +package met + +import "time" + +type Backend interface { + NewCount(key string) Count + NewGauge(key string, val int64) Gauge + NewMeter(key string, val int64) Meter + NewTimer(key string, val time.Duration) Timer +} + +// Count is a type that counts how many hits it's seen in each given interval +// and computes the rate per second +// it's not a long-running counter. +// values are explicit +type Count interface { + Inc(val int64) +} + +// gauge makes sure its value is explicit (i.e. for statsd, keep sending) +type Gauge interface { + Dec(val int64) + Inc(val int64) + Value(val int64) +} + +// like a timer, but not just for timings +type Meter interface { + Value(val int64) +} + +// computes stasticical summaries +type Timer interface { + Value(val time.Duration) +} diff --git a/vendor/github.com/raintank/met/statsd/count.go b/vendor/github.com/raintank/met/statsd/count.go new file mode 100644 index 0000000000..f04a9dabc9 --- /dev/null +++ b/vendor/github.com/raintank/met/statsd/count.go @@ -0,0 +1,18 @@ +package statsd + +import "github.com/raintank/met" + +type Count struct { + key string + backend Backend +} + +func (b Backend) NewCount(key string) met.Count { + c := Count{key, b} + c.Inc(0) + return c +} + +func (c Count) Inc(val int64) { + c.backend.client.Count(c.key, int(val), 1) +} diff --git a/vendor/github.com/raintank/met/statsd/gauge.go b/vendor/github.com/raintank/met/statsd/gauge.go new file mode 100644 index 0000000000..34ae8e9e33 --- /dev/null +++ b/vendor/github.com/raintank/met/statsd/gauge.go @@ -0,0 +1,52 @@ +package statsd + +import ( + "sync" + "time" + + "github.com/raintank/met" +) + +type Gauge struct { + key string + val int64 + sync.Mutex + backend Backend +} + +func (b Backend) NewGauge(key string, val int64) met.Gauge { + g := Gauge{ + key: key, + backend: b, + } + go func() { + for { + g.Lock() + g.backend.client.Gauge(g.key, int(g.val)) + g.Unlock() + time.Sleep(time.Duration(1) * time.Second) + } + }() + return &g +} + +func (g *Gauge) Value(val int64) { + g.Lock() + g.val = val + g.Unlock() + g.backend.client.Gauge(g.key, int(val)) +} + +func (g *Gauge) Inc(val int64) { + g.Lock() + defer g.Unlock() + g.val += val + g.backend.client.Gauge(g.key, int(g.val)) +} + +func (g *Gauge) Dec(val int64) { + g.Lock() + defer g.Unlock() + g.val -= val + g.backend.client.Gauge(g.key, int(g.val)) +} diff --git a/vendor/github.com/raintank/met/statsd/init.go b/vendor/github.com/raintank/met/statsd/init.go new file mode 100644 index 0000000000..0ef863e378 --- /dev/null +++ b/vendor/github.com/raintank/met/statsd/init.go @@ -0,0 +1,14 @@ +package statsd + +import "gopkg.in/alexcesaro/statsd.v1" + +type Backend struct { + client *statsd.Client +} + +// note: library does not auto add ending dot to prefix. +func New(enabled bool, addr, prefix string) (Backend, error) { + client, err := statsd.New(addr, statsd.WithPrefix(prefix), statsd.Mute(!enabled)) + b := Backend{client} + return b, err +} diff --git a/vendor/github.com/raintank/met/statsd/meter.go b/vendor/github.com/raintank/met/statsd/meter.go new file mode 100644 index 0000000000..34a01460d6 --- /dev/null +++ b/vendor/github.com/raintank/met/statsd/meter.go @@ -0,0 +1,20 @@ +// it's commonly used for non-timer cases where we want these summaries, that's +// what this is for. +package statsd + +import "github.com/raintank/met" + +type Meter struct { + key string + backend Backend +} + +func (b Backend) NewMeter(key string, val int64) met.Meter { + m := Meter{key, b} + m.Value(val) + return m +} + +func (m Meter) Value(val int64) { + m.backend.client.Timing(m.key, int(val), 1) +} diff --git a/vendor/github.com/raintank/met/statsd/timer.go b/vendor/github.com/raintank/met/statsd/timer.go new file mode 100644 index 0000000000..f4a72fb437 --- /dev/null +++ b/vendor/github.com/raintank/met/statsd/timer.go @@ -0,0 +1,23 @@ +package statsd + +import "time" +import "github.com/raintank/met" + +// note that due the preseeding in init, you shouldn't rely on the count and count_ps summaries +// rather, consider maintaining a separate counter +// see https://github.com/raintank/grafana/issues/133 + +type Timer struct { + key string + backend Backend +} + +func (b Backend) NewTimer(key string, val time.Duration) met.Timer { + t := Timer{key, b} + t.Value(val) + return t +} + +func (t Timer) Value(val time.Duration) { + t.backend.client.Timing(t.key, int(val/time.Millisecond), 1) +} diff --git a/vendor/github.com/raintank/worldping-api/.bra.toml b/vendor/github.com/raintank/worldping-api/.bra.toml new file mode 100644 index 0000000000..1b8656e608 --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/.bra.toml @@ -0,0 +1,16 @@ +[run] +init_cmds = [ + ["go", "build", "-o", "./bin/worldping-api"], + ["./bin/worldping-api"] +] +watch_all = true +watch_dirs = [ + "$WORKDIR/pkg", + "$WORKDIR/conf", +] +watch_exts = [".go", ".ini", ".toml", ".html"] +build_delay = 1500 +cmds = [ + ["godep", "go", "build", "-o", "./bin/worldping-api"], + ["./bin/worldping-api"] +] diff --git a/vendor/github.com/raintank/worldping-api/.editorconfig b/vendor/github.com/raintank/worldping-api/.editorconfig new file mode 100644 index 0000000000..5760be5836 --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/.editorconfig @@ -0,0 +1,12 @@ +# http://editorconfig.org +root = true + +[*] +indent_style = space +indent_size = 2 +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.md] +trim_trailing_whitespace = false diff --git a/vendor/github.com/raintank/worldping-api/.gitignore b/vendor/github.com/raintank/worldping-api/.gitignore new file mode 100644 index 0000000000..5203f119e4 --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/.gitignore @@ -0,0 +1,28 @@ +node_modules +coverage/ +.aws-config.json +awsconfig +/dist +/emails/dist +/public_gen +/tmp + +# Editor junk +*.sublime-workspace +*.swp +.idea/ +*.iml + +/data/* +/bin/* + +conf/custom* +fig.yml +profile.cov + +rt-pkg/build +.notouch + +worldping-api +.notouch + diff --git a/vendor/github.com/raintank/worldping-api/.gitmodules b/vendor/github.com/raintank/worldping-api/.gitmodules new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/raintank/worldping-api/CHANGELOG.md b/vendor/github.com/raintank/worldping-api/CHANGELOG.md new file mode 100644 index 0000000000..fbac06aa30 --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/CHANGELOG.md @@ -0,0 +1,3 @@ +# 0.0.1 (2016-03-31) + +Beta Release diff --git a/vendor/github.com/raintank/worldping-api/README.md b/vendor/github.com/raintank/worldping-api/README.md new file mode 100644 index 0000000000..982944e262 --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/README.md @@ -0,0 +1,10 @@ +[worldPing](https://worldping.raintank.io) [![Circle CI](https://circleci.com/gh/raintank/worldping-api.svg?style=shield)](https://circleci.com/gh/raintank/worldping-api) +================ +[Website](https://worldping.raintank.io) | +[Twitter](https://twitter.com/raintankSaaS) | +[Slack](https://raintank.slack.com) | +[Email](mailto:hello@raintank.io) + + +Worldping-api is the backend service for the worldPing-app available from [grafana.net](https://grafana.net/plugins/raintank-worldping-app) + diff --git a/vendor/github.com/raintank/worldping-api/apiary.apib b/vendor/github.com/raintank/worldping-api/apiary.apib new file mode 100644 index 0000000000..fa19f80bb5 --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/apiary.apib @@ -0,0 +1,778 @@ +FORMAT: 1A +HOST: http://worldping-api.raintank.io/ + +# worldPing API + +With the worldPing API, you can do everything that's available within the user interface. + +### What is worldPing? +worldPing is an app for [Grafana](//www.grafana.org) that continually tests, stores and alerts on the global performance and uptime of your Internet applications. + +[Install worldPing at Grafana.net](https://grafana.net/plugins/raintank-worldping-app) + +### Authentication + +To authenticate your API requests, construct a normal HTTPS request and include an Authorization header with the value of Bearer . + +For example: +``` +curl -H "Authorization: Bearer API_KEY" http://worldping-api.raintank.io/api/{method} +``` + + +#### Getting your API Key + +API Keys are generated in Grafana.net in the API Keys tab of the [User Profile](https://grafana.net/profile/api-keys). API Keys may also be revoked in the same location. + +## MonitorTypes [/api/monitor_types] +A monitor is a type of check, which is used in creating and configuring endpoints. Currently supported checks are DNS, Ping, HTTP and HTTPS. + +This method returns a list of supported checks that can be performed and the configuration paramaters for each of those checks. + +### List MonitorTypes [GET] + ++ Request + + + Headers + + Authorization: Bearer API_KEY + ++ Response 200 (application/json) + + [ + { + "id":4, + "name":"DNS", + "settings":[ + { + "variable":"name", + "description":"Record Name", + "required":true, + "data_type":"String", + "conditions":{ + + }, + "default_value":"" + }, + { + "variable":"type", + "description":"Record Type", + "required":true, + "data_type":"Enum", + "conditions":{ + "values":[ + "A", + "AAAA", + "CNAME", + "MX", + "NS", + "PTR", + "SOA", + "SRV", + "TXT" + ] + }, + "default_value":"A" + }, + { + "variable":"server", + "description":"Server", + "required":false, + "data_type":"String", + "conditions":{ + + }, + "default_value":"8.8.8.8" + }, + { + "variable":"port", + "description":"Port", + "required":false, + "data_type":"Number", + "conditions":{ + + }, + "default_value":"53" + }, + { + "variable":"protocol", + "description":"Protocol", + "required":false, + "data_type":"Enum", + "conditions":{ + "values":[ + "tcp", + "udp" + ] + }, + "default_value":"udp" + }, + { + "variable":"timeout", + "description":"Timeout", + "required":true, + "data_type":"Number", + "conditions":{ + + }, + "default_value":"5" + } + ] + }, + { + "id":1, + "name":"HTTP", + "settings":[ + { + "variable":"host", + "description":"Hostname", + "required":true, + "data_type":"String", + "conditions":{ + + }, + "default_value":"" + }, + { + "variable":"path", + "description":"Path", + "required":true, + "data_type":"String", + "conditions":{ + + }, + "default_value":"/" + }, + { + "variable":"port", + "description":"Port", + "required":false, + "data_type":"Number", + "conditions":{ + + }, + "default_value":"80" + }, + { + "variable":"method", + "description":"Method", + "required":false, + "data_type":"Enum", + "conditions":{ + "values":[ + "GET", + "POST", + "PUT", + "DELETE", + "HEAD" + ] + }, + "default_value":"GET" + }, + { + "variable":"headers", + "description":"Headers", + "required":false, + "data_type":"Text", + "conditions":{ + + }, + "default_value":"Accept-Encoding: gzip\nUser-Agent: raintank collector\n" + }, + { + "variable":"expectRegex", + "description":"Content Match", + "required":false, + "data_type":"String", + "conditions":{ + + }, + "default_value":"" + }, + { + "variable":"timeout", + "description":"Timeout", + "required":true, + "data_type":"Number", + "conditions":{ + + }, + "default_value":"5" + } + ] + }, + { + "id":2, + "name":"HTTPS", + "settings":[ + { + "variable":"host", + "description":"Hostname", + "required":true, + "data_type":"String", + "conditions":{ + + }, + "default_value":"" + }, + { + "variable":"path", + "description":"Path", + "required":true, + "data_type":"String", + "conditions":{ + + }, + "default_value":"/" + }, + { + "variable":"validateCert", + "description":"Validate SSL Certificate", + "required":false, + "data_type":"Boolean", + "conditions":{ + + }, + "default_value":"true" + }, + { + "variable":"port", + "description":"Port", + "required":false, + "data_type":"Number", + "conditions":{ + + }, + "default_value":"443" + }, + { + "variable":"method", + "description":"Method", + "required":false, + "data_type":"Enum", + "conditions":{ + "values":[ + "GET", + "POST", + "PUT", + "DELETE", + "HEAD" + ] + }, + "default_value":"GET" + }, + { + "variable":"headers", + "description":"Headers", + "required":false, + "data_type":"Text", + "conditions":{ + + }, + "default_value":"Accept-Encoding: gzip\nUser-Agent: raintank collector\n" + }, + { + "variable":"expectRegex", + "description":"Content Match", + "required":false, + "data_type":"String", + "conditions":{ + + }, + "default_value":"" + }, + { + "variable":"timeout", + "description":"Timeout", + "required":true, + "data_type":"Number", + "conditions":{ + + }, + "default_value":"5" + } + ] + }, + { + "id":3, + "name":"Ping", + "settings":[ + { + "variable":"hostname", + "description":"Hostname", + "required":true, + "data_type":"String", + "conditions":{ + + }, + "default_value":"" + }, + { + "variable":"timeout", + "description":"Timeout", + "required":true, + "data_type":"Number", + "conditions":{ + + }, + "default_value":"5" + } + ] + } + ] + +## Monitors [/api/monitors] + +A monitor is a check performed on an Endpoint. These methods accepts an Endpoint ID and returns the list of or updates the configured checks on that individual endpoint. + +When you want to update the monitors on a particular endpoint, this method is used, not the *Update Endpoint* method. + +### List Monitors [GET /api/monitors{?endpoint_id}] + ++ Request + + + Headers + + Authorization: Bearer API_KEY + ++ Response 200 (application/json) + + [ + { + "id":19678, + "org_id":1, + "endpoint_id":1, + "endpoint_slug":"www_raintank_io", + "monitor_type_id":1, + "monitor_type_name":"HTTP", + "collector_ids":[ + 1, + 2, + 3, + 4 + ], + "collector_tags":[ + + ], + "collectors":[ + 1, + 2, + 3, + 4 + ], + "state":0, + "state_change":"2016-04-20T14:09:58Z", + "state_check":"2016-04-20T14:14:28Z", + "settings":[ + { + "variable":"host", + "value":"www.raintank.io" + }, + { + "variable":"port", + "value":"80" + }, + { + "variable":"path", + "value":"/" + }, + { + "variable":"timeout", + "value":"5" + }, + { + "variable":"method", + "value":"GET" + }, + { + "variable":"expectRegex", + "value":"" + }, + { + "variable":"headers", + "value":"Accept-Encoding: gzip\nUser-Agent: raintank collector\n" + } + ], + "health_settings":{ + "num_collectors":3, + "steps":3, + "notifications":{ + "enabled":false, + "addresses":"" + } + }, + "frequency":60, + "enabled":true, + "offset":58, + "updated":"2016-04-20T14:09:48Z" + }, + { + "id":19684, + "org_id":1, + "endpoint_id":1, + "endpoint_slug":"www_raintank_io", + "monitor_type_id":3, + "monitor_type_name":"Ping", + "collector_ids":[ + 1, + 2, + 3, + 4 + ], + "collector_tags":[ + + ], + "collectors":[ + 1, + 2, + 3, + 4 + ], + "state":0, + "state_change":"2016-04-20T14:09:58Z", + "state_check":"2016-04-20T14:15:18Z", + "settings":[ + { + "variable":"hostname", + "value":"www.raintank.io" + }, + { + "variable":"timeout", + "value":"5" + } + ], + "health_settings":{ + "num_collectors":3, + "steps":3, + "notifications":{ + "enabled":false, + "addresses":"" + } + }, + "frequency":10, + "enabled":true, + "offset":8, + "updated":"2016-04-20T14:09:48Z" + } + ] + +### Update Monitor [POST /api/monitors] ++ Request + + + Headers + + Authorization: Bearer API_KEY + ++ Request (application/json) + + { + "id":19681, + "endpoint_id":1, + "monitor_type_id":2, + "collector_ids":[ + 1, + 2, + 3, + 4 + ], + "collector_tags":[ + + ], + "settings":[ + { + "variable":"host", + "value":"www.raintank.io" + }, + { + "variable":"port", + "value":"443" + }, + { + "variable":"path", + "value":"/about/" + }, + { + "variable":"timeout", + "value":"5" + }, + { + "variable":"method", + "value":"GET" + }, + { + "variable":"expectRegex", + "value":"" + }, + { + "variable":"validateCert", + "value":"true" + }, + { + "variable":"headers", + "value":"Accept-Encoding: gzip\nUser-Agent: raintank collector\n" + } + ], + "enabled":true, + "frequency":60, + "health_settings":{ + "num_collectors":3, + "steps":3, + "notifications":{ + "enabled":false, + "addresses":"" + } + } + } + ++ Response 200 (application/json) + + { + "message":"Monitor updated" + } + +## Endpoints [/api/endpoints] + +An endpoint is anything you want to monitor, and is the primary way of interacting with worldPing. An endpoint can be a fully formed URL or IP address, and when monitored by private probes, does not even need to be accessible to the internet. + +These methods allow you to list, create, update and delete your endpoints. + +**Updating:** The Update Endpoint method only allows you to change the endpoint name, slug and tags. To update the monitors and configuration for an endpoint, use the *Update Monitor* method listed above. + +**Creating:** The exception to this rule is the *Create Endpoint* method, which accepts the monitor options at the time of creation. + +### List All Endpoints [GET /api/endpoints{?tag}] ++ Request + + + Headers + + Authorization: Bearer API_KEY + ++ Response 200 (application/json) + + [ + { + "id":1, + "org_id":1, + "name":"www.raintank.io", + "slug":"www_raintank_io", + "tags":[ + "raintank" + ] + }, + { + "id":2, + "org_id":1, + "name":"www.google.com", + "slug":"www_google_com", + "tags":[ + "google", + "external" + ] + } + ] + +### Update Endpoint [POST /api/endpoints] + ++ Request + + + Headers + + Authorization: Bearer API_KEY + ++ Request (application/json) + + { + "id":1, + "org_id":1, + "name":"www.raintank.io", + "tags":[ + "raintank", + "demo" + ] + } + ++ Response 200 (application/json) + + { + "message":"Endpoint updated" + } + +### Create Endpoint [PUT /api/endpoints] ++ Request + + + Headers + + Authorization: Bearer API_KEY + ++ Request (application/json) + + { + "name":"www.raintank.io", + "tags":[ + "raintank" + ], + "monitors":[ + { + "monitor_type_id":1, + "settings":[ + { + "variable":"host", + "value":"www.raintank.io" + }, + { + "variable":"port", + "value":"80" + }, + { + "variable":"path", + "value":"/" + }, + { + "variable":"timeout", + "value":"5" + }, + { + "variable":"method", + "value":"GET" + }, + { + "variable":"expectRegex", + "value":"" + } + ], + "endpoint_id":-1, + "collector_ids":[ + 1, + 2, + 3, + 4 + ], + "collector_tags":[ + + ], + "enabled":true, + "frequency":60, + "health_settings":{ + "steps":3, + "num_collectors":3, + "notifications":{ + "enabled":false, + "addresses":"" + } + } + }, + { + "monitor_type_id":3, + "settings":[ + { + "variable":"hostname", + "value":"www.raintank.io" + }, + { + "variable":"timeout", + "value":"5" + } + ], + "endpoint_id":-1, + "collector_ids":[ + 1, + 2, + 3, + 4 + ], + "collector_tags":[ + + ], + "enabled":true, + "frequency":10, + "health_settings":{ + "steps":3, + "num_collectors":3, + "notifications":{ + "enabled":false, + "addresses":"" + } + } + } + ] + } + ++ Response 200 (application/json) + + { + "id":1, + "org_id":1, + "name":"www.raintank.io", + "slug":"www_raintank_io", + "tags":[ + "raintank" + ] + } + + +## Probes [/api/collectors] + +Probes provide the execution of periodic network performance tests including HTTP checks, DNS and Ping. The results of each test are then transfered back to the worldPing API where they are processed and inserted into a timeseries database. + +This method allows the listing of existing probes, both official worldPing probes as well as any private probes that have been created. + +### List all Probes [GET] + ++ Request + + + Headers + + Authorization: Bearer API_KEY + ++ Response 200 (application/json) + + [ + { + "id":1, + "org_id":1, + "slug":"san-francisco", + "name":"San Francisco", + "tags":[ + "USA" + ], + "public":true, + "latitude":0, + "longitude":0, + "online":true, + "online_change":"2016-04-20T13:09:42Z", + "enabled":true, + "enabled_change":"2015-07-10T21:39:02Z" + }, + { + "id":2, + "org_id":1, + "slug":"amsterdam", + "name":"Amsterdam", + "tags":[ + + ], + "public":true, + "latitude":0, + "longitude":0, + "online":true, + "online_change":"2016-04-20T07:03:42Z", + "enabled":true, + "enabled_change":"2015-07-10T21:39:02Z" + }, + { + "id":3, + "org_id":1, + "slug":"london", + "name":"London", + "tags":[ + "EMEA" + ], + "public":true, + "latitude":0, + "longitude":0, + "online":true, + "online_change":"2016-04-20T12:02:26Z", + "enabled":true, + "enabled_change":"2015-07-10T21:39:02Z" + } + ] \ No newline at end of file diff --git a/vendor/github.com/raintank/worldping-api/circle.yml b/vendor/github.com/raintank/worldping-api/circle.yml new file mode 100644 index 0000000000..a26934804a --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/circle.yml @@ -0,0 +1,33 @@ +machine: + node: + version: 4.0 + environment: + GOPATH: "/home/ubuntu/.go_workspace" + ORG_PATH: "github.com/raintank" + REPO_PATH: "${ORG_PATH}/worldping-api" + +dependencies: + pre: + - if [[ ! -e /usr/local/go-1.6 ]]; then sudo mv /usr/local/go /usr/local/go-1.5.1; wget https://storage.googleapis.com/golang/go1.6.1.linux-amd64.tar.gz && sudo tar -C /usr/local -zxf go1.6.1.linux-amd64.tar.gz && sudo mv /usr/local/go /usr/local/go-1.6 && sudo ln -s /usr/local/go-1.6 /usr/local/go; fi + override: + - rt-pkg/depends.sh + - rt-pkg/build.sh +general: + artifacts: + - rt-pkg/artifacts + +test: + override: + # FMT + - test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" + # GO VET + - go vet ./pkg/... + # Go test + - go test -v ./pkg/... + post: + - cd rt-pkg && /bin/bash packaging.sh +deployment: + production: + branch: master + commands: + - rt-pkg/deploy.sh diff --git a/vendor/github.com/raintank/worldping-api/main.go b/vendor/github.com/raintank/worldping-api/main.go new file mode 100644 index 0000000000..89e0a72a11 --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/main.go @@ -0,0 +1,150 @@ +package main + +import ( + "flag" + "io/ioutil" + "os" + "os/signal" + "path/filepath" + "runtime" + "strconv" + "syscall" + "time" + + "github.com/Dieterbe/profiletrigger/heap" + "github.com/raintank/met/helper" + "github.com/raintank/worldping-api/pkg/alerting" + "github.com/raintank/worldping-api/pkg/api" + "github.com/raintank/worldping-api/pkg/cmd" + "github.com/raintank/worldping-api/pkg/events" + "github.com/raintank/worldping-api/pkg/log" + "github.com/raintank/worldping-api/pkg/services/collectoreventpublisher" + "github.com/raintank/worldping-api/pkg/services/metricpublisher" + "github.com/raintank/worldping-api/pkg/services/notifications" + "github.com/raintank/worldping-api/pkg/services/sqlstore" + "github.com/raintank/worldping-api/pkg/setting" +) + +var version = "master" +var commit = "NA" +var buildstamp string + +var configFile = flag.String("config", "", "path to config file") +var homePath = flag.String("homepath", "", "path to grafana install/home path, defaults to working directory") +var pidFile = flag.String("pidfile", "", "path to pid file") +var exitChan = make(chan int) + +func init() { + runtime.GOMAXPROCS(runtime.NumCPU()) +} + +func main() { + buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64) + + setting.BuildVersion = version + setting.BuildCommit = commit + setting.BuildStamp = buildstampInt64 + notifyShutdown := make(chan struct{}) + go listenToSystemSignels(notifyShutdown) + + flag.Parse() + writePIDFile() + initRuntime() + + if setting.ProfileHeapMB > 0 { + errors := make(chan error) + go func() { + for e := range errors { + log.Error(0, e.Error()) + } + }() + heap, _ := heap.New(setting.ProfileHeapDir, setting.ProfileHeapMB*1000000, setting.ProfileHeapWait, time.Duration(1)*time.Second, errors) + go heap.Run() + } + + metricsBackend, err := helper.New(setting.StatsdEnabled, setting.StatsdAddr, setting.StatsdType, "worldping-api", setting.InstanceId) + if err != nil { + log.Error(3, "Statsd client:", err) + } + + // only local events supported. + events.Init() + + metricpublisher.Init(metricsBackend) + collectoreventpublisher.Init(metricsBackend) + + api.InitCollectorController(metricsBackend) + if setting.AlertingEnabled { + alerting.Init(metricsBackend) + alerting.Construct() + } + + if err := notifications.Init(); err != nil { + log.Fatal(3, "Notification service failed to initialize", err) + } + + cmd.StartServer(notifyShutdown) + exitChan <- 0 +} + +func initRuntime() { + err := setting.NewConfigContext(&setting.CommandLineArgs{ + Config: *configFile, + HomePath: *homePath, + Args: flag.Args(), + }) + + if err != nil { + log.Fatal(3, err.Error()) + } + + log.Info("Starting worldping-api") + log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0)) + setting.LogConfigurationInfo() + + sqlstore.NewEngine() +} + +func writePIDFile() { + if *pidFile == "" { + return + } + + // Ensure the required directory structure exists. + err := os.MkdirAll(filepath.Dir(*pidFile), 0700) + if err != nil { + log.Fatal(3, "Failed to verify pid directory", err) + } + + // Retrieve the PID and write it. + pid := strconv.Itoa(os.Getpid()) + if err := ioutil.WriteFile(*pidFile, []byte(pid), 0644); err != nil { + log.Fatal(3, "Failed to write pidfile", err) + } +} + +func listenToSystemSignels(notifyShutdown chan struct{}) { + signalChan := make(chan os.Signal, 1) + code := 0 + + signal.Notify(signalChan, os.Interrupt) + signal.Notify(signalChan, os.Kill) + signal.Notify(signalChan, syscall.SIGTERM) + + select { + case sig := <-signalChan: + log.Info("Received signal %s. shutting down", sig) + case code = <-exitChan: + switch code { + case 0: + log.Info("Shutting down") + default: + log.Warn("Shutting down") + } + } + close(notifyShutdown) + + api.ShutdownController() + log.Close() + os.Exit(code) +} diff --git a/vendor/github.com/raintank/worldping-api/test.sh b/vendor/github.com/raintank/worldping-api/test.sh new file mode 100755 index 0000000000..881497029f --- /dev/null +++ b/vendor/github.com/raintank/worldping-api/test.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# The script does automatic checking on a Go package and its sub-packages, including: +# 1. gofmt (http://golang.org/cmd/gofmt/) +# 2. goimports (https://github.com/bradfitz/goimports) +# 3. golint (https://github.com/golang/lint) +# 4. go vet (http://golang.org/cmd/vet) +# 5. race detector (http://blog.golang.org/race-detector) +# 6. test coverage (http://blog.golang.org/cover) + +set -e + +# Automatic checks +# test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" +# go vet ./pkg/... +# godep go test -v -race ./pkg/... + +echo "mode: count" > profile.cov + +# Standard go tooling behavior is to ignore dirs with leading underscors +for dir in $(find ./pkg/ -maxdepth 4 -type d); +do +if ls $dir/*.go &> /dev/null; then + godep go test -covermode=count -coverprofile=$dir/profile.tmp $dir + if [ -f $dir/profile.tmp ] + then + cat $dir/profile.tmp | tail -n +2 >> profile.cov + rm $dir/profile.tmp + fi +fi +done + +go tool cover -func profile.cov + +# To submit the test coverage result to coveralls.io, +# use goveralls (https://github.com/mattn/goveralls) +# goveralls -coverprofile=profile.cov -service=travis-ci diff --git a/vendor/github.com/rakyll/globalconf/.travis.yml b/vendor/github.com/rakyll/globalconf/.travis.yml new file mode 100644 index 0000000000..fb9efcb9a2 --- /dev/null +++ b/vendor/github.com/rakyll/globalconf/.travis.yml @@ -0,0 +1,2 @@ +language: go +go: 1.2 diff --git a/vendor/github.com/rakyll/globalconf/globalconf_test.go b/vendor/github.com/rakyll/globalconf/globalconf_test.go new file mode 100644 index 0000000000..f36f74cea4 --- /dev/null +++ b/vendor/github.com/rakyll/globalconf/globalconf_test.go @@ -0,0 +1,267 @@ +package globalconf + +import ( + "flag" + "io/ioutil" + "os" + "testing" +) + +const envTestPrefix = "CONFTEST_" + +func TestNewWithOptionsNoFilename(t *testing.T) { + opts := Options{EnvPrefix: envTestPrefix} + + os.Setenv(envTestPrefix+"D", "EnvD") + + flagD := flag.String("d", "default", "") + flagE := flag.Bool("e", true, "") + + conf, err := NewWithOptions(&opts) + if err != nil { + t.Fatal(err) + } + conf.ParseAll() + + if *flagD != "EnvD" { + t.Errorf("flagD found %v, expected 'EnvD'", *flagD) + } + if !*flagE { + t.Errorf("flagE found %v, expected true", *flagE) + } +} + +func TestParse_Global(t *testing.T) { + resetForTesting("") + + os.Setenv(envTestPrefix+"D", "EnvD") + os.Setenv(envTestPrefix+"E", "true") + os.Setenv(envTestPrefix+"F", "5.5") + + flagA := flag.Bool("a", false, "") + flagB := flag.Float64("b", 0.0, "") + flagC := flag.String("c", "", "") + + flagD := flag.String("d", "", "") + flagE := flag.Bool("e", false, "") + flagF := flag.Float64("f", 0.0, "") + + parse(t, "./testdata/global.ini", envTestPrefix) + if !*flagA { + t.Errorf("flagA found %v, expected true", *flagA) + } + if *flagB != 5.6 { + t.Errorf("flagB found %v, expected 5.6", *flagB) + } + if *flagC != "Hello world" { + t.Errorf("flagC found %v, expected 'Hello world'", *flagC) + } + if *flagD != "EnvD" { + t.Errorf("flagD found %v, expected 'EnvD'", *flagD) + } + if !*flagE { + t.Errorf("flagE found %v, expected true", *flagE) + } + if *flagF != 5.5 { + t.Errorf("flagF found %v, expected 5.5", *flagF) + } +} + +func TestParse_DashConversion(t *testing.T) { + resetForTesting("") + + flagFooBar := flag.String("foo-bar", "", "") + os.Setenv("PREFIX_FOO_BAR", "baz") + + opts := Options{EnvPrefix: "PREFIX_"} + conf, err := NewWithOptions(&opts) + if err != nil { + t.Fatal(err) + } + conf.ParseAll() + + if *flagFooBar != "baz" { + t.Errorf("flagFooBar found %v, expected 5.5", *flagFooBar) + } +} + +func TestParse_GlobalWithDottedFlagname(t *testing.T) { + resetForTesting("") + os.Setenv(envTestPrefix+"SOME_VALUE", "some-value") + flagSomeValue := flag.String("some.value", "", "") + + parse(t, "./testdata/global.ini", envTestPrefix) + if *flagSomeValue != "some-value" { + t.Errorf("flagSomeValue found %v, some-value expected", *flagSomeValue) + } +} + +func TestParse_GlobalOverwrite(t *testing.T) { + resetForTesting("-b=7.6") + flagB := flag.Float64("b", 0.0, "") + + parse(t, "./testdata/global.ini", "") + if *flagB != 7.6 { + t.Errorf("flagB found %v, expected 7.6", *flagB) + } +} + +func TestParse_Custom(t *testing.T) { + resetForTesting("") + + os.Setenv(envTestPrefix+"CUSTOM_E", "Hello Env") + + flagB := flag.Float64("b", 5.0, "") + + name := "custom" + custom := flag.NewFlagSet(name, flag.ExitOnError) + flagD := custom.String("d", "dd", "") + flagE := custom.String("e", "ee", "") + + Register(name, custom) + parse(t, "./testdata/custom.ini", envTestPrefix) + if *flagB != 5.0 { + t.Errorf("flagB found %v, expected 5.0", *flagB) + } + if *flagD != "Hello d" { + t.Errorf("flagD found %v, expected 'Hello d'", *flagD) + } + if *flagE != "Hello Env" { + t.Errorf("flagE found %v, expected 'Hello Env'", *flagE) + } +} + +func TestParse_CustomOverwrite(t *testing.T) { + resetForTesting("-b=6") + flagB := flag.Float64("b", 5.0, "") + + name := "custom" + custom := flag.NewFlagSet(name, flag.ExitOnError) + flagD := custom.String("d", "dd", "") + + Register(name, custom) + parse(t, "./testdata/custom.ini", "") + if *flagB != 6.0 { + t.Errorf("flagB found %v, expected 6.0", *flagB) + } + if *flagD != "Hello d" { + t.Errorf("flagD found %v, expected 'Hello d'", *flagD) + } +} + +func TestParse_GlobalAndCustom(t *testing.T) { + resetForTesting("") + flagA := flag.Bool("a", false, "") + flagB := flag.Float64("b", 0.0, "") + flagC := flag.String("c", "", "") + + name := "custom" + custom := flag.NewFlagSet(name, flag.ExitOnError) + flagD := custom.String("d", "", "") + + Register(name, custom) + parse(t, "./testdata/globalandcustom.ini", "") + if !*flagA { + t.Errorf("flagA found %v, expected true", *flagA) + } + if *flagB != 5.6 { + t.Errorf("flagB found %v, expected 5.6", *flagB) + } + if *flagC != "Hello world" { + t.Errorf("flagC found %v, expected 'Hello world'", *flagC) + } + if *flagD != "Hello d" { + t.Errorf("flagD found %v, expected 'Hello d'", *flagD) + } +} + +func TestParse_GlobalAndCustomOverwrite(t *testing.T) { + resetForTesting("-a=true", "-b=5", "-c=Hello") + flagA := flag.Bool("a", false, "") + flagB := flag.Float64("b", 0.0, "") + flagC := flag.String("c", "", "") + + name := "custom" + custom := flag.NewFlagSet(name, flag.ExitOnError) + flagD := custom.String("d", "", "") + + Register(name, custom) + parse(t, "./testdata/globalandcustom.ini", "") + if !*flagA { + t.Errorf("flagA found %v, expected true", *flagA) + } + if *flagB != 5.0 { + t.Errorf("flagB found %v, expected 5.0", *flagB) + } + if *flagC != "Hello" { + t.Errorf("flagC found %v, expected 'Hello'", *flagC) + } + if *flagD != "Hello d" { + t.Errorf("flagD found %v, expected 'Hello d'", *flagD) + } +} + +func TestSet(t *testing.T) { + resetForTesting() + file, _ := ioutil.TempFile("", "") + conf := parse(t, file.Name(), "") + conf.Set("", &flag.Flag{Name: "a", Value: newFlagValue("test")}) + + flagA := flag.String("a", "", "") + parse(t, file.Name(), "") + if *flagA != "test" { + t.Errorf("flagA found %v, expected 'test'", *flagA) + } +} + +func TestDelete(t *testing.T) { + resetForTesting() + file, _ := ioutil.TempFile("", "") + conf := parse(t, file.Name(), "") + conf.Set("", &flag.Flag{Name: "a", Value: newFlagValue("test")}) + conf.Delete("", "a") + + flagA := flag.String("a", "", "") + parse(t, file.Name(), "") + if *flagA != "" { + t.Errorf("flagNewA found %v, expected ''", *flagA) + } +} + +func parse(t *testing.T, filename, envPrefix string) *GlobalConf { + opts := Options{ + Filename: filename, + EnvPrefix: envPrefix, + } + conf, err := NewWithOptions(&opts) + if err != nil { + t.Error(err) + } + conf.ParseAll() + return conf +} + +// Resets os.Args and the default flag set. +func resetForTesting(args ...string) { + os.Clearenv() + + os.Args = append([]string{"cmd"}, args...) + flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ExitOnError) +} + +type flagValue struct { + str string +} + +func (f *flagValue) String() string { + return f.str +} + +func (f *flagValue) Set(value string) error { + f.str = value + return nil +} + +func newFlagValue(val string) *flagValue { + return &flagValue{str: val} +} diff --git a/vendor/github.com/rakyll/goini/.gitignore b/vendor/github.com/rakyll/goini/.gitignore new file mode 100644 index 0000000000..6facd5a723 --- /dev/null +++ b/vendor/github.com/rakyll/goini/.gitignore @@ -0,0 +1,8 @@ +.*.swp + +*.[689] +[689].out + +_obj +_test +_testmain.go diff --git a/vendor/github.com/rakyll/goini/ini_test.go b/vendor/github.com/rakyll/goini/ini_test.go new file mode 100644 index 0000000000..8a15eea4c2 --- /dev/null +++ b/vendor/github.com/rakyll/goini/ini_test.go @@ -0,0 +1,169 @@ +package ini + +import ( + "io/ioutil" + "testing" +) + +const ( + exampleStr = `key1 = true + +[section1] +key1 = value2 +key2 = 5 +key3 = 1.3 + +[section2] +key1 = 5 + +` +) + +var ( + dict Dict + err error +) + +func init() { + dict, err = Load("example.ini") +} + +func TestLoad(t *testing.T) { + if err != nil { + t.Error("Example: load error:", err) + } +} + +func TestWrite(t *testing.T) { + d, err := Load("empty.ini") + if err != nil { + t.Error("Example: load error:", err) + } + d.SetString("", "key", "value") + tempFile, err := ioutil.TempFile("", "") + if err != nil { + t.Error("Write: Couldn't create temp file.", err) + } + err = Write(tempFile.Name(), &d) + if err != nil { + t.Error("Write: Couldn't write to temp config file.", err) + } + contents, err := ioutil.ReadFile(tempFile.Name()) + if err != nil { + t.Error("Write: Couldn't read from the temp config file.", err) + } + if string(contents) != "key = value\n\n" { + t.Error("Write: Contents of the config file doesn't match the expected.") + } +} + +func TestGetBool(t *testing.T) { + b, found := dict.GetBool("pizza", "ham") + if !found || !b { + t.Error("Example: parse error for key ham of section pizza.") + } + b, found = dict.GetBool("pizza", "mushrooms") + if !found || !b { + t.Error("Example: parse error for key mushrooms of section pizza.") + } + b, found = dict.GetBool("pizza", "capres") + if !found || b { + t.Error("Example: parse error for key capres of section pizza.") + } + b, found = dict.GetBool("pizza", "cheese") + if !found || b { + t.Error("Example: parse error for key cheese of section pizza.") + } +} + +func TestGetStringIntAndDouble(t *testing.T) { + str, found := dict.GetString("wine", "grape") + if !found || str != "Cabernet Sauvignon" { + t.Error("Example: parse error for key grape of section wine.") + } + i, found := dict.GetInt("wine", "year") + if !found || i != 1989 { + t.Error("Example: parse error for key year of section wine.") + } + str, found = dict.GetString("wine", "country") + if !found || str != "Spain" { + t.Error("Example: parse error for key grape of section wine.") + } + d, found := dict.GetDouble("wine", "alcohol") + if !found || d != 12.5 { + t.Error("Example: parse error for key grape of section wine.") + } +} + +func TestSetBoolAndStringAndIntAndDouble(t *testing.T) { + dict.SetBool("pizza", "ham", false) + b, found := dict.GetBool("pizza", "ham") + if !found || b { + t.Error("Example: bool set error for key ham of section pizza.") + } + dict.SetString("pizza", "ham", "no") + n, found := dict.GetString("pizza", "ham") + if !found || n != "no" { + t.Error("Example: string set error for key ham of section pizza.") + } + dict.SetInt("wine", "year", 1978) + i, found := dict.GetInt("wine", "year") + if !found || i != 1978 { + t.Error("Example: int set error for key year of section wine.") + } + dict.SetDouble("wine", "not-exists", 5.6) + d, found := dict.GetDouble("wine", "not-exists") + if !found || d != 5.6 { + t.Error("Example: float set error for not existing key for wine.") + } +} + +func TestDelete(t *testing.T) { + d, err := Load("empty.ini") + if err != nil { + t.Error("Example: load error:", err) + } + d.SetString("pizza", "ham", "yes") + d.Delete("pizza", "ham") + _, found := d.GetString("pizza", "ham") + if found { + t.Error("Example: delete error for key ham of section pizza.") + } + if len(d.GetSections()) > 1 { + t.Error("Only a single section should exist after deletion.") + } +} + +func TestGetNotExist(t *testing.T) { + _, found := dict.GetString("not", "exist") + if found { + t.Error("There is no key exist of section not.") + } +} + +func TestGetSections(t *testing.T) { + sections := dict.GetSections() + if len(sections) != 3 { + t.Error("The number of sections is wrong:", len(sections)) + } + for _, section := range sections { + if section != "" && section != "pizza" && section != "wine" { + t.Errorf("Section '%s' should not be exist.", section) + } + } +} + +func TestString(t *testing.T) { + d, err := Load("empty.ini") + if err != nil { + t.Error("Example: load error:", err) + } + d.SetBool("", "key1", true) + d.SetString("section1", "key1", "value2") + d.SetInt("section1", "key2", 5) + d.SetDouble("section1", "key3", 1.3) + d.SetDouble("section2", "key1", 5.0) + if d.String() != exampleStr { + t.Errorf("Dict cannot be stringified as expected.") + } +} diff --git a/vendor/github.com/rs/cors/.travis.yml b/vendor/github.com/rs/cors/.travis.yml new file mode 100644 index 0000000000..bbb5185a2e --- /dev/null +++ b/vendor/github.com/rs/cors/.travis.yml @@ -0,0 +1,4 @@ +language: go +go: +- 1.3 +- 1.4 diff --git a/vendor/github.com/rs/cors/bench_test.go b/vendor/github.com/rs/cors/bench_test.go new file mode 100644 index 0000000000..b6e3721dec --- /dev/null +++ b/vendor/github.com/rs/cors/bench_test.go @@ -0,0 +1,88 @@ +package cors + +import ( + "net/http" + "testing" +) + +type FakeResponse struct { + header http.Header +} + +func (r FakeResponse) Header() http.Header { + return r.header +} + +func (r FakeResponse) WriteHeader(n int) { +} + +func (r FakeResponse) Write(b []byte) (n int, err error) { + return len(b), nil +} + +func BenchmarkWithout(b *testing.B) { + res := FakeResponse{http.Header{}} + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + testHandler.ServeHTTP(res, req) + } +} + +func BenchmarkDefault(b *testing.B) { + res := FakeResponse{http.Header{}} + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "somedomain.com") + handler := Default().Handler(testHandler) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + handler.ServeHTTP(res, req) + } +} + +func BenchmarkAllowedOrigin(b *testing.B) { + res := FakeResponse{http.Header{}} + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "somedomain.com") + c := New(Options{ + AllowedOrigins: []string{"somedomain.com"}, + }) + handler := c.Handler(testHandler) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + handler.ServeHTTP(res, req) + } +} + +func BenchmarkPreflight(b *testing.B) { + res := FakeResponse{http.Header{}} + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Access-Control-Request-Method", "GET") + handler := Default().Handler(testHandler) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + handler.ServeHTTP(res, req) + } +} + +func BenchmarkPreflightHeader(b *testing.B) { + res := FakeResponse{http.Header{}} + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Access-Control-Request-Method", "GET") + req.Header.Add("Access-Control-Request-Headers", "Accept") + handler := Default().Handler(testHandler) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + handler.ServeHTTP(res, req) + } +} diff --git a/vendor/github.com/rs/cors/cors_test.go b/vendor/github.com/rs/cors/cors_test.go new file mode 100644 index 0000000000..1c6899f52e --- /dev/null +++ b/vendor/github.com/rs/cors/cors_test.go @@ -0,0 +1,720 @@ +package cors + +import ( + "net/http" + "net/http/httptest" + "regexp" + "strings" + "testing" +) + +var testHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("bar")) +}) + +func assertHeaders(t *testing.T, resHeaders http.Header, reqHeaders map[string]string) { + for name, value := range reqHeaders { + if actual := strings.Join(resHeaders[name], ", "); actual != value { + t.Errorf("Invalid header `%s', wanted `%s', got `%s'", name, value, actual) + } + } +} + +func assertResponse(t *testing.T, res *httptest.ResponseRecorder, responseCode int) { + if responseCode != res.Code { + t.Errorf("assertResponse: expected response code to be %d but got %d. ", responseCode, res.Code) + } +} + +func TestNoConfig(t *testing.T) { + s := New(Options{ + // Intentionally left blank. + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestMatchAllOrigin(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"*"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestAllowedOrigin(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestWildcardOrigin(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://*.bar.com"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foo.bar.com") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "http://foo.bar.com", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestDisallowedOrigin(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://barbaz.com") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestDisallowedWildcardOrigin(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://*.bar.com"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foo.baz.com") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestAllowedOriginFunc(t *testing.T) { + r, _ := regexp.Compile("^http://foo") + s := New(Options{ + AllowOriginFunc: func(o string) bool { + return r.MatchString(o) + }, + }) + + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + + res := httptest.NewRecorder() + req.Header.Set("Origin", "http://foobar.com") + s.Handler(testHandler).ServeHTTP(res, req) + assertHeaders(t, res.Header(), map[string]string{ + "Access-Control-Allow-Origin": "http://foobar.com", + }) + + res = httptest.NewRecorder() + req.Header.Set("Origin", "http://barfoo.com") + s.Handler(testHandler).ServeHTTP(res, req) + assertHeaders(t, res.Header(), map[string]string{ + "Access-Control-Allow-Origin": "", + }) +} + +func TestMaxAge(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://example.com/"}, + AllowedMethods: []string{"GET"}, + MaxAge: 10, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://example.com/") + req.Header.Add("Access-Control-Request-Method", "GET") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "http://example.com/", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "10", + "Access-Control-Expose-Headers": "", + }) +} + +func TestAllowedMethod(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + AllowedMethods: []string{"PUT", "DELETE"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + req.Header.Add("Access-Control-Request-Method", "PUT") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "PUT", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestDisallowedMethod(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + AllowedMethods: []string{"PUT", "DELETE"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + req.Header.Add("Access-Control-Request-Method", "PATCH") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestAllowedHeader(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + AllowedHeaders: []string{"X-Header-1", "x-header-2"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + req.Header.Add("Access-Control-Request-Method", "GET") + req.Header.Add("Access-Control-Request-Headers", "X-Header-2, X-HEADER-1") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Headers": "X-Header-2, X-Header-1", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestAllowedWildcardHeader(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + AllowedHeaders: []string{"*"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + req.Header.Add("Access-Control-Request-Method", "GET") + req.Header.Add("Access-Control-Request-Headers", "X-Header-2, X-HEADER-1") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Headers": "X-Header-2, X-Header-1", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestDisallowedHeader(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + AllowedHeaders: []string{"X-Header-1", "x-header-2"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + req.Header.Add("Access-Control-Request-Method", "GET") + req.Header.Add("Access-Control-Request-Headers", "X-Header-3, X-Header-1") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestOriginHeader(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + req.Header.Add("Access-Control-Request-Method", "GET") + req.Header.Add("Access-Control-Request-Headers", "origin") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Headers": "Origin", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestExposedHeader(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + ExposedHeaders: []string{"X-Header-1", "x-header-2"}, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "X-Header-1, X-Header-2", + }) +} + +func TestAllowedCredentials(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foobar.com"}, + AllowCredentials: true, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://foobar.com") + req.Header.Add("Access-Control-Request-Method", "GET") + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "http://foobar.com", + "Access-Control-Allow-Methods": "GET", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestDebug(t *testing.T) { + s := New(Options{ + Debug: true, + }) + + if s.logf == nil { + t.Error("Logger not created when debug=true") + } + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestOptionsPassthrough(t *testing.T) { + s := New(Options{ + OptionsPassthrough: true, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) + +} + +func TestDisableOptionsPassthrough(t *testing.T) { + s := New(Options{ + OptionsPassthrough: true, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + + s.Handler(testHandler).ServeHTTP(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) + + s.Handler(testHandler).ServeHTTP(res, req) + + assertResponse(t, res, 200) + +} + +func TestDefault(t *testing.T) { + s := Default() + if s.Log != nil { + t.Error("c.log should be nil when Default") + } + if !s.allowedOriginsAll { + t.Error("c.allowedOriginsAll should be true when Default") + } + if s.allowedHeaders == nil { + t.Error("c.allowedHeaders should be nil when Default") + } + if s.allowedMethods == nil { + t.Error("c.allowedMethods should be nil when Default") + } +} + +func TestHandlerFunc(t *testing.T) { + s := New(Options{ + // Intentionally left blank. + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + + s.HandlerFunc(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestHandlerFuncPreflight(t *testing.T) { + s := New(Options{ + // Intentionally left blank. + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + + s.HandlerFunc(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) + +} + +func TestNegroniHandler(t *testing.T) { + s := New(Options{ + // Intentionally left blank. + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + + s.ServeHTTP(res, req, testHandler) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestNegroniHandlerPreflight(t *testing.T) { + s := New(Options{ + OptionsPassthrough: true, + }) + + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + + s.ServeHTTP(res, req, testHandler) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestHandlePreflightInvlaidOriginAbortion(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foo.com"}, + }) + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://example.com/") + + s.handlePreflight(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin, Access-Control-Request-Method, Access-Control-Request-Headers", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestHandlePreflightNoOptionsAbortion(t *testing.T) { + s := New(Options{ + // Intentionally left blank. + }) + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + + s.handlePreflight(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestHandleActualRequestAbortsOptionsMethod(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foo.com"}, + }) + res := httptest.NewRecorder() + req, _ := http.NewRequest("OPTIONS", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://example.com/") + + s.handleActualRequest(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestHandleActualRequestInvlaidOriginAbortion(t *testing.T) { + s := New(Options{ + AllowedOrigins: []string{"http://foo.com"}, + }) + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://example.com/") + + s.handleActualRequest(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestHandleActualRequestAllowsCredentials(t *testing.T) { + s := New(Options{ + AllowCredentials: true, + }) + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://example.com/") + + s.handleActualRequest(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "http://example.com/", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "true", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestHandleActualRequestInvlaidMethodAbortion(t *testing.T) { + s := New(Options{ + AllowedMethods: []string{"POST"}, + AllowCredentials: true, + }) + res := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "http://example.com/foo", nil) + req.Header.Add("Origin", "http://example.com/") + + s.handleActualRequest(res, req) + + assertHeaders(t, res.Header(), map[string]string{ + "Vary": "Origin", + "Access-Control-Allow-Origin": "", + "Access-Control-Allow-Methods": "", + "Access-Control-Allow-Headers": "", + "Access-Control-Allow-Credentials": "", + "Access-Control-Max-Age": "", + "Access-Control-Expose-Headers": "", + }) +} + +func TestIsMethodAllowedReturnsFalseWithNoMethods(t *testing.T) { + s := New(Options{ + // Intentionally left blank. + }) + s.allowedMethods = []string{} + if s.isMethodAllowed("") { + t.Error("IsMethodAllowed should return false when c.allowedMethods is nil.") + } +} + +func TestIsMethodAllowedReturnsTrueWithOptions(t *testing.T) { + s := New(Options{ + // Intentionally left blank. + }) + if !s.isMethodAllowed("OPTIONS") { + t.Error("IsMethodAllowed should return true when c.allowedMethods is nil.") + } +} diff --git a/vendor/github.com/rs/cors/utils_test.go b/vendor/github.com/rs/cors/utils_test.go new file mode 100644 index 0000000000..83053b3fc9 --- /dev/null +++ b/vendor/github.com/rs/cors/utils_test.go @@ -0,0 +1,70 @@ +package cors + +import ( + "strings" + "testing" +) + +func TestWildcard(t *testing.T) { + w := wildcard{"foo", "bar"} + if !w.match("foobar") { + t.Error("foo*bar should match foobar") + } + if !w.match("foobazbar") { + t.Error("foo*bar should match foobazbar") + } + if w.match("foobaz") { + t.Error("foo*bar should not match foobaz") + } + + w = wildcard{"foo", "oof"} + if w.match("foof") { + t.Error("foo*oof should not match foof") + } +} + +func TestConvert(t *testing.T) { + s := convert([]string{"A", "b", "C"}, strings.ToLower) + e := []string{"a", "b", "c"} + if s[0] != e[0] || s[1] != e[1] || s[2] != e[2] { + t.Errorf("%v != %v", s, e) + } +} + +func TestParseHeaderList(t *testing.T) { + h := parseHeaderList("header, second-header, THIRD-HEADER, Numb3r3d-H34d3r") + e := []string{"Header", "Second-Header", "Third-Header", "Numb3r3d-H34d3r"} + if h[0] != e[0] || h[1] != e[1] || h[2] != e[2] { + t.Errorf("%v != %v", h, e) + } +} + +func TestParseHeaderListEmpty(t *testing.T) { + if len(parseHeaderList("")) != 0 { + t.Error("should be empty sclice") + } + if len(parseHeaderList(" , ")) != 0 { + t.Error("should be empty sclice") + } +} + +func BenchmarkParseHeaderList(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + parseHeaderList("header, second-header, THIRD-HEADER") + } +} + +func BenchmarkParseHeaderListSingle(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + parseHeaderList("header") + } +} + +func BenchmarkParseHeaderListNormalized(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + parseHeaderList("Header1, Header2, Third-Header") + } +} diff --git a/vendor/github.com/rs/xhandler/.travis.yml b/vendor/github.com/rs/xhandler/.travis.yml new file mode 100644 index 0000000000..b65c7a9f1e --- /dev/null +++ b/vendor/github.com/rs/xhandler/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: +- 1.5 +- tip +matrix: + allow_failures: + - go: tip diff --git a/vendor/github.com/rs/xhandler/chain_example_test.go b/vendor/github.com/rs/xhandler/chain_example_test.go new file mode 100644 index 0000000000..d0178e5805 --- /dev/null +++ b/vendor/github.com/rs/xhandler/chain_example_test.go @@ -0,0 +1,85 @@ +package xhandler_test + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/rs/cors" + "github.com/rs/xhandler" + "golang.org/x/net/context" +) + +func ExampleChain() { + c := xhandler.Chain{} + // Append a context-aware middleware handler + c.UseC(xhandler.CloseHandler) + + // Mix it with a non-context-aware middleware handler + c.Use(cors.Default().Handler) + + // Another context-aware middleware handler + c.UseC(xhandler.TimeoutHandler(2 * time.Second)) + + mux := http.NewServeMux() + + // Use c.Handler to terminate the chain with your final handler + mux.Handle("/", c.Handler(xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }))) + + // You can reuse the same chain for other handlers + mux.Handle("/api", c.Handler(xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the API!") + }))) +} + +func ExampleAddChain() { + c := xhandler.Chain{} + + close := xhandler.CloseHandler + cors := cors.Default().Handler + timeout := xhandler.TimeoutHandler(2 * time.Second) + auth := func(next xhandler.HandlerC) xhandler.HandlerC { + return xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + if v := ctx.Value("Authorization"); v == nil { + http.Error(w, "Not authorized", http.StatusUnauthorized) + return + } + next.ServeHTTPC(ctx, w, r) + }) + } + + c.Add(close, cors, timeout) + + mux := http.NewServeMux() + + // Use c.Handler to terminate the chain with your final handler + mux.Handle("/", c.Handler(xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }))) + + // Create a new chain from an existing one, and add route-specific middleware to it + protected := c.With(auth) + + mux.Handle("/admin", protected.Handler(xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "protected endpoint!") + }))) +} + +func ExampleIf() { + c := xhandler.Chain{} + + // Add a timeout handler only if the URL path matches a prefix + c.UseC(xhandler.If( + func(ctx context.Context, w http.ResponseWriter, r *http.Request) bool { + return strings.HasPrefix(r.URL.Path, "/with-timeout/") + }, + xhandler.TimeoutHandler(2*time.Second), + )) + + http.Handle("/", c.Handler(xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }))) +} diff --git a/vendor/github.com/rs/xhandler/chain_test.go b/vendor/github.com/rs/xhandler/chain_test.go new file mode 100644 index 0000000000..acbd648975 --- /dev/null +++ b/vendor/github.com/rs/xhandler/chain_test.go @@ -0,0 +1,209 @@ +package xhandler + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestAppendHandlerC(t *testing.T) { + init := 0 + h1 := func(next HandlerC) HandlerC { + init++ + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + ctx = context.WithValue(ctx, "test", 1) + next.ServeHTTPC(ctx, w, r) + }) + } + h2 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + ctx = context.WithValue(ctx, "test", 2) + next.ServeHTTPC(ctx, w, r) + }) + } + c := Chain{} + c.UseC(h1) + c.UseC(h2) + assert.Len(t, c, 2) + + h := c.Handler(HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + // Test ordering + assert.Equal(t, 2, ctx.Value("test"), "second handler should overwrite first handler's context value") + })) + + h.ServeHTTP(nil, nil) + h.ServeHTTP(nil, nil) + assert.Equal(t, 1, init, "handler init called once") +} + +func TestAppendHandler(t *testing.T) { + init := 0 + h1 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + ctx = context.WithValue(ctx, "test", 1) + next.ServeHTTPC(ctx, w, r) + }) + } + h2 := func(next http.Handler) http.Handler { + init++ + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Change r and w values + w = httptest.NewRecorder() + r = &http.Request{} + next.ServeHTTP(w, r) + }) + } + c := Chain{} + c.UseC(h1) + c.Use(h2) + assert.Len(t, c, 2) + + h := c.Handler(HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + // Test ordering + assert.Equal(t, 1, ctx.Value("test"), + "the first handler value should be pass through the second (non-aware) one") + // Test r and w overwrite + assert.NotNil(t, w) + assert.NotNil(t, r) + })) + + h.ServeHTTP(nil, nil) + h.ServeHTTP(nil, nil) + // There's no safe way to not initialize non ctx aware handlers on each request :/ + //assert.Equal(t, 1, init, "handler init called once") +} + +func TestChainHandlerC(t *testing.T) { + handlerCalls := 0 + h1 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + ctx = context.WithValue(ctx, "test", 1) + next.ServeHTTPC(ctx, w, r) + }) + } + h2 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + ctx = context.WithValue(ctx, "test", 2) + next.ServeHTTPC(ctx, w, r) + }) + } + + c := Chain{} + c.UseC(h1) + c.UseC(h2) + h := c.HandlerC(HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + + assert.Equal(t, 2, ctx.Value("test"), + "second handler should overwrite first handler's context value") + assert.Equal(t, 1, ctx.Value("mainCtx"), + "the mainCtx value should be pass through") + })) + + mainCtx := context.WithValue(context.Background(), "mainCtx", 1) + h.ServeHTTPC(mainCtx, nil, nil) + + assert.Equal(t, 3, handlerCalls, "all handler called once") +} + +func TestAdd(t *testing.T) { + handlerCalls := 0 + h1 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + ctx = context.WithValue(ctx, "test", 1) + next.ServeHTTPC(ctx, w, r) + }) + } + h2 := func(next http.Handler) http.Handler { + handlerCalls++ + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Change r and w values + w = httptest.NewRecorder() + r = &http.Request{} + next.ServeHTTP(w, r) + }) + } + h3 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + ctx = context.WithValue(ctx, "test", 2) + next.ServeHTTPC(ctx, w, r) + }) + } + + c := Chain{} + c.Add(h1, h2, h3) + h := c.HandlerC(HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + + assert.Equal(t, 2, ctx.Value("test"), + "third handler should overwrite first handler's context value") + assert.Equal(t, 1, ctx.Value("mainCtx"), + "the mainCtx value should be pass through") + })) + + mainCtx := context.WithValue(context.Background(), "mainCtx", 1) + h.ServeHTTPC(mainCtx, nil, nil) + assert.Equal(t, 4, handlerCalls, "all handler called once") +} + +func TestWith(t *testing.T) { + handlerCalls := 0 + h1 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + ctx = context.WithValue(ctx, "test", 1) + next.ServeHTTPC(ctx, w, r) + }) + } + h2 := func(next http.Handler) http.Handler { + handlerCalls++ + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Change r and w values + w = httptest.NewRecorder() + r = &http.Request{} + next.ServeHTTP(w, r) + }) + } + h3 := func(next HandlerC) HandlerC { + return HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + ctx = context.WithValue(ctx, "test", 2) + next.ServeHTTPC(ctx, w, r) + }) + } + + c := Chain{} + c.Add(h1) + d := c.With(h2, h3) + + h := c.HandlerC(HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + + assert.Equal(t, 1, ctx.Value("test"), + "third handler should not overwrite the first handler's context value") + assert.Equal(t, 1, ctx.Value("mainCtx"), + "the mainCtx value should be pass through") + })) + i := d.HandlerC(HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + handlerCalls++ + + assert.Equal(t, 2, ctx.Value("test"), + "third handler should overwrite first handler's context value") + assert.Equal(t, 1, ctx.Value("mainCtx"), + "the mainCtx value should be pass through") + })) + + mainCtx := context.WithValue(context.Background(), "mainCtx", 1) + h.ServeHTTPC(mainCtx, nil, nil) + assert.Equal(t, 2, handlerCalls, "all handlers called once") + handlerCalls = 0 + i.ServeHTTPC(mainCtx, nil, nil) + assert.Equal(t, 4, handlerCalls, "all handler called once") +} diff --git a/vendor/github.com/rs/xhandler/middleware_test.go b/vendor/github.com/rs/xhandler/middleware_test.go new file mode 100644 index 0000000000..51306e390d --- /dev/null +++ b/vendor/github.com/rs/xhandler/middleware_test.go @@ -0,0 +1,88 @@ +package xhandler + +import ( + "log" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestTimeoutHandler(t *testing.T) { + ctx := context.WithValue(context.Background(), contextKey, "value") + xh := TimeoutHandler(time.Second)(&handler{}) + h := New(ctx, xh) + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + log.Fatal(err) + } + h.ServeHTTP(w, r) + assert.Equal(t, "value with deadline", w.Body.String()) +} + +type closeNotifyWriter struct { + *httptest.ResponseRecorder + closed bool +} + +func (w *closeNotifyWriter) CloseNotify() <-chan bool { + notify := make(chan bool, 1) + if w.closed { + // return an already "closed" notifier + notify <- true + } + return notify +} + +func TestCloseHandlerClientClose(t *testing.T) { + ctx := context.WithValue(context.Background(), contextKey, "value") + xh := CloseHandler(&handler{}) + h := New(ctx, xh) + w := &closeNotifyWriter{ResponseRecorder: httptest.NewRecorder(), closed: true} + r, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + log.Fatal(err) + } + h.ServeHTTP(w, r) + assert.Equal(t, "value canceled", w.Body.String()) +} + +func TestCloseHandlerRequestEnds(t *testing.T) { + ctx := context.WithValue(context.Background(), contextKey, "value") + xh := CloseHandler(&handler{}) + h := New(ctx, xh) + w := &closeNotifyWriter{ResponseRecorder: httptest.NewRecorder(), closed: false} + r, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + log.Fatal(err) + } + h.ServeHTTP(w, r) + assert.Equal(t, "value", w.Body.String()) +} + +func TestIf(t *testing.T) { + trueHandler := HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + assert.Equal(t, "/true", r.URL.Path) + }) + falseHandler := HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + assert.NotEqual(t, "/true", r.URL.Path) + }) + ctx := context.WithValue(context.Background(), contextKey, "value") + xh := If( + func(ctx context.Context, w http.ResponseWriter, r *http.Request) bool { + return r.URL.Path == "/true" + }, + func(next HandlerC) HandlerC { + return trueHandler + }, + )(falseHandler) + h := New(ctx, xh) + r, _ := http.NewRequest("GET", "http://example.com/true", nil) + h.ServeHTTP(nil, r) + r, _ = http.NewRequest("GET", "http://example.com/false", nil) + h.ServeHTTP(nil, r) +} diff --git a/vendor/github.com/rs/xhandler/xhandler_example_test.go b/vendor/github.com/rs/xhandler/xhandler_example_test.go new file mode 100644 index 0000000000..9f6f8a50f5 --- /dev/null +++ b/vendor/github.com/rs/xhandler/xhandler_example_test.go @@ -0,0 +1,67 @@ +package xhandler_test + +import ( + "log" + "net/http" + "time" + + "github.com/rs/xhandler" + "golang.org/x/net/context" +) + +type key int + +const contextKey key = 0 + +func newContext(ctx context.Context, value string) context.Context { + return context.WithValue(ctx, contextKey, value) +} + +func fromContext(ctx context.Context) (string, bool) { + value, ok := ctx.Value(contextKey).(string) + return value, ok +} + +func ExampleHandle() { + var xh xhandler.HandlerC + xh = xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + value, _ := fromContext(ctx) + w.Write([]byte("Hello " + value)) + }) + + xh = (func(next xhandler.HandlerC) xhandler.HandlerC { + return xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + ctx = newContext(ctx, "World") + next.ServeHTTPC(ctx, w, r) + }) + })(xh) + + ctx := context.Background() + // Bridge context aware handlers with http.Handler using xhandler.Handle() + http.Handle("/", xhandler.New(ctx, xh)) + + if err := http.ListenAndServe(":8080", nil); err != nil { + log.Fatal(err) + } +} + +func ExampleHandleTimeout() { + var xh xhandler.HandlerC + xh = xhandler.HandlerFuncC(func(ctx context.Context, w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Hello World")) + if _, ok := ctx.Deadline(); ok { + w.Write([]byte(" with deadline")) + } + }) + + // This handler adds a timeout to the handler + xh = xhandler.TimeoutHandler(5 * time.Second)(xh) + + ctx := context.Background() + // Bridge context aware handlers with http.Handler using xhandler.Handle() + http.Handle("/", xhandler.New(ctx, xh)) + + if err := http.ListenAndServe(":8080", nil); err != nil { + log.Fatal(err) + } +} diff --git a/vendor/github.com/rs/xhandler/xhandler_test.go b/vendor/github.com/rs/xhandler/xhandler_test.go new file mode 100644 index 0000000000..3f5021fa26 --- /dev/null +++ b/vendor/github.com/rs/xhandler/xhandler_test.go @@ -0,0 +1,61 @@ +package xhandler + +import ( + "log" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +type handler struct{} + +type key int + +const contextKey key = 0 + +func newContext(ctx context.Context, value string) context.Context { + return context.WithValue(ctx, contextKey, value) +} + +func fromContext(ctx context.Context) (string, bool) { + value, ok := ctx.Value(contextKey).(string) + return value, ok +} + +func (h handler) ServeHTTPC(ctx context.Context, w http.ResponseWriter, r *http.Request) { + // Leave other go routines a chance to run + time.Sleep(time.Nanosecond) + value, _ := fromContext(ctx) + if _, ok := ctx.Deadline(); ok { + value += " with deadline" + } + if ctx.Err() == context.Canceled { + value += " canceled" + } + w.Write([]byte(value)) +} + +func TestHandle(t *testing.T) { + ctx := context.WithValue(context.Background(), contextKey, "value") + h := New(ctx, &handler{}) + w := httptest.NewRecorder() + r, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + log.Fatal(err) + } + h.ServeHTTP(w, r) + assert.Equal(t, "value", w.Body.String()) +} + +func TestHandlerFunc(t *testing.T) { + ok := false + xh := HandlerFuncC(func(context.Context, http.ResponseWriter, *http.Request) { + ok = true + }) + xh.ServeHTTPC(nil, nil, nil) + assert.True(t, ok) +} diff --git a/vendor/github.com/sean-/seed/.gitignore b/vendor/github.com/sean-/seed/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/sean-/seed/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/sean-/seed/init_test.go b/vendor/github.com/sean-/seed/init_test.go new file mode 100644 index 0000000000..4736061f73 --- /dev/null +++ b/vendor/github.com/sean-/seed/init_test.go @@ -0,0 +1,26 @@ +package seed_test + +import ( + "testing" + + "github.com/sean-/seed" +) + +func TestInit(t *testing.T) { + secure, err := seed.Init() + if !secure { + t.Fatalf("Failed to securely seed: %v", err) + } +} + +func TestMustInit(t *testing.T) { + seed.MustInit() + + if !seed.Seeded() { + t.Fatalf("MustInit() failed to seed") + } + + if !seed.Secure() { + t.Fatalf("MustInit() failed to securely seed") + } +} diff --git a/vendor/github.com/sergi/go-diff/.gitignore b/vendor/github.com/sergi/go-diff/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/sergi/go-diff/.travis.yml b/vendor/github.com/sergi/go-diff/.travis.yml new file mode 100644 index 0000000000..4a22ed3573 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/.travis.yml @@ -0,0 +1,27 @@ +language: go + +os: + - linux + - osx + +go: + - 1.7.x + - 1.8.x + +sudo: false + +env: + global: + # Coveralls.io + - secure: OGYOsFNXNarEZ5yA4/M6ZdVguD0jL8vXgXrbLzjcpkKcq8ObHSCtNINoUlnNf6l6Z92kPnuV+LSm7jKTojBlov4IwgiY1ACbvg921SdjxYkg1AiwHTRTLR1g/esX8RdaBpJ0TOcXOFFsYMRVvl5sxxtb0tXSuUrT+Ch4SUCY7X8= + +install: + - make install-dependencies + - make install-tools + - make install + +script: + - make lint + - make test-with-coverage + - gover + - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi diff --git a/vendor/github.com/sergi/go-diff/APACHE-LICENSE-2.0 b/vendor/github.com/sergi/go-diff/APACHE-LICENSE-2.0 new file mode 100644 index 0000000000..f433b1a53f --- /dev/null +++ b/vendor/github.com/sergi/go-diff/APACHE-LICENSE-2.0 @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/sergi/go-diff/AUTHORS b/vendor/github.com/sergi/go-diff/AUTHORS new file mode 100644 index 0000000000..2d7bb2bf57 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/AUTHORS @@ -0,0 +1,25 @@ +# This is the official list of go-diff authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/CONTRIBUTORS b/vendor/github.com/sergi/go-diff/CONTRIBUTORS new file mode 100644 index 0000000000..369e3d5519 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/CONTRIBUTORS @@ -0,0 +1,32 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the go-diff +# repository. +# +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, ACME Inc. employees would be listed here +# but not in AUTHORS, because ACME Inc. would hold the copyright. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file. +# +# Names should be added to this file like so: +# Name +# +# Please keep the list sorted. + +Danny Yoo +James Kolb +Jonathan Amsterdam +Markus Zimmermann +Matt Kovars +Örjan Persson +Osman Masood +Robert Carlsen +Rory Flynn +Sergi Mansilla +Shatrugna Sadhu +Shawn Smith +Stas Maksimov +Tor Arvid Lund +Zac Bergquist diff --git a/vendor/github.com/sergi/go-diff/Makefile b/vendor/github.com/sergi/go-diff/Makefile new file mode 100644 index 0000000000..e013f0b31c --- /dev/null +++ b/vendor/github.com/sergi/go-diff/Makefile @@ -0,0 +1,44 @@ +.PHONY: all clean clean-coverage install install-dependencies install-tools lint test test-verbose test-with-coverage + +export ARGS := $(wordlist 2,$(words $(MAKECMDGOALS)),$(MAKECMDGOALS)) +export PKG := github.com/sergi/go-diff +export ROOT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST)))) + +$(eval $(ARGS):;@:) # turn arguments into do-nothing targets +export ARGS + +ifdef ARGS + PKG_TEST := $(ARGS) +else + PKG_TEST := $(PKG)/... +endif + +all: install-tools install-dependencies install lint test + +clean: + go clean -i $(PKG)/... + go clean -i -race $(PKG)/... +clean-coverage: + find $(ROOT_DIR) | grep .coverprofile | xargs rm +install: + go install -v $(PKG)/... +install-dependencies: + go get -t -v $(PKG)/... + go build -v $(PKG)/... +install-tools: + # Install linting tools + go get -u -v github.com/golang/lint/... + go get -u -v github.com/kisielk/errcheck/... + + # Install code coverage tools + go get -u -v github.com/onsi/ginkgo/ginkgo/... + go get -u -v github.com/modocache/gover/... + go get -u -v github.com/mattn/goveralls/... +lint: + $(ROOT_DIR)/scripts/lint.sh +test: + go test -race -test.timeout 120s $(PKG_TEST) +test-verbose: + go test -race -test.timeout 120s -v $(PKG_TEST) +test-with-coverage: + ginkgo -r -cover -race -skipPackage="testdata" diff --git a/vendor/github.com/sergi/go-diff/README.md b/vendor/github.com/sergi/go-diff/README.md new file mode 100644 index 0000000000..597437bc75 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/README.md @@ -0,0 +1,84 @@ +# go-diff [![GoDoc](https://godoc.org/github.com/sergi/go-diff?status.png)](https://godoc.org/github.com/sergi/go-diff/diffmatchpatch) [![Build Status](https://travis-ci.org/sergi/go-diff.svg?branch=master)](https://travis-ci.org/sergi/go-diff) [![Coverage Status](https://coveralls.io/repos/sergi/go-diff/badge.png?branch=master)](https://coveralls.io/r/sergi/go-diff?branch=master) + +go-diff offers algorithms to perform operations required for synchronizing plain text: + +- Compare two texts and return their differences. +- Perform fuzzy matching of text. +- Apply patches onto text. + +## Installation + +```bash +go get -u github.com/sergi/go-diff/... +``` + +## Usage + +The following example compares two texts and writes out the differences to standard output. + +```go +package main + +import ( + "fmt" + + "github.com/sergi/go-diff/diffmatchpatch" +) + +const ( + text1 = "Lorem ipsum dolor." + text2 = "Lorem dolor sit amet." +) + +func main() { + dmp := diffmatchpatch.New() + + diffs := dmp.DiffMain(text1, text2, false) + + fmt.Println(dmp.DiffPrettyText(diffs)) +} +``` + +## Found a bug or are you missing a feature in go-diff? + +Please make sure to have the latest version of go-diff. If the problem still persists go through the [open issues](https://github.com/sergi/go-diff/issues) in the tracker first. If you cannot find your request just open up a [new issue](https://github.com/sergi/go-diff/issues/new). + +## How to contribute? + +You want to contribute to go-diff? GREAT! If you are here because of a bug you want to fix or a feature you want to add, you can just read on. Otherwise we have a list of [open issues in the tracker](https://github.com/sergi/go-diff/issues). Just choose something you think you can work on and discuss your plans in the issue by commenting on it. + +Please make sure that every behavioral change is accompanied by test cases. Additionally, every contribution must pass the `lint` and `test` Makefile targets which can be run using the following commands in the repository root directory. + +```bash +make lint +make test +``` + +After your contribution passes these commands, [create a PR](https://help.github.com/articles/creating-a-pull-request/) and we will review your contribution. + +## Origins + +go-diff is a Go language port of Neil Fraser's google-diff-match-patch code. His original code is available at [http://code.google.com/p/google-diff-match-patch/](http://code.google.com/p/google-diff-match-patch/). + +## Copyright and License + +The original Google Diff, Match and Patch Library is licensed under the [Apache License 2.0](http://www.apache.org/licenses/LICENSE-2.0). The full terms of that license are included here in the [APACHE-LICENSE-2.0](/APACHE-LICENSE-2.0) file. + +Diff, Match and Patch Library + +> Written by Neil Fraser +> Copyright (c) 2006 Google Inc. +> + +This Go version of Diff, Match and Patch Library is licensed under the [MIT License](http://www.opensource.org/licenses/MIT) (a.k.a. the Expat License) which is included here in the [LICENSE](/LICENSE) file. + +Go version of Diff, Match and Patch Library + +> Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +> + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/benchutil_test.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/benchutil_test.go new file mode 100644 index 0000000000..b8e404d045 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/benchutil_test.go @@ -0,0 +1,28 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "io/ioutil" +) + +const testdataPath = "../testdata/" + +func speedtestTexts() (s1 string, s2 string) { + d1, err := ioutil.ReadFile(testdataPath + "speedtest1.txt") + if err != nil { + panic(err) + } + d2, err := ioutil.ReadFile(testdataPath + "speedtest2.txt") + if err != nil { + panic(err) + } + + return string(d1), string(d2) +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff_test.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff_test.go new file mode 100644 index 0000000000..b52bd70166 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff_test.go @@ -0,0 +1,1427 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "testing" + "time" + "unicode/utf8" + + "github.com/stretchr/testify/assert" +) + +func pretty(diffs []Diff) string { + var w bytes.Buffer + + for i, diff := range diffs { + _, _ = w.WriteString(fmt.Sprintf("%v. ", i)) + + switch diff.Type { + case DiffInsert: + _, _ = w.WriteString("DiffIns") + case DiffDelete: + _, _ = w.WriteString("DiffDel") + case DiffEqual: + _, _ = w.WriteString("DiffEql") + default: + _, _ = w.WriteString("Unknown") + } + + _, _ = w.WriteString(fmt.Sprintf(": %v\n", diff.Text)) + } + + return w.String() +} + +func diffRebuildTexts(diffs []Diff) []string { + texts := []string{"", ""} + + for _, d := range diffs { + if d.Type != DiffInsert { + texts[0] += d.Text + } + if d.Type != DiffDelete { + texts[1] += d.Text + } + } + + return texts +} + +func TestDiffCommonPrefix(t *testing.T) { + type TestCase struct { + Name string + + Text1 string + Text2 string + + Expected int + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Null", "abc", "xyz", 0}, + {"Non-null", "1234abcdef", "1234xyz", 4}, + {"Whole", "1234", "1234xyz", 4}, + } { + actual := dmp.DiffCommonPrefix(tc.Text1, tc.Text2) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func BenchmarkDiffCommonPrefix(b *testing.B) { + s := "ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ" + + dmp := New() + + for i := 0; i < b.N; i++ { + dmp.DiffCommonPrefix(s, s) + } +} + +func TestCommonPrefixLength(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + + Expected int + } + + for i, tc := range []TestCase{ + {"abc", "xyz", 0}, + {"1234abcdef", "1234xyz", 4}, + {"1234", "1234xyz", 4}, + } { + actual := commonPrefixLength([]rune(tc.Text1), []rune(tc.Text2)) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestDiffCommonSuffix(t *testing.T) { + type TestCase struct { + Name string + + Text1 string + Text2 string + + Expected int + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Null", "abc", "xyz", 0}, + {"Non-null", "abcdef1234", "xyz1234", 4}, + {"Whole", "1234", "xyz1234", 4}, + } { + actual := dmp.DiffCommonSuffix(tc.Text1, tc.Text2) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func BenchmarkDiffCommonSuffix(b *testing.B) { + s := "ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖ" + + dmp := New() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + dmp.DiffCommonSuffix(s, s) + } +} + +func TestCommonSuffixLength(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + + Expected int + } + + for i, tc := range []TestCase{ + {"abc", "xyz", 0}, + {"abcdef1234", "xyz1234", 4}, + {"1234", "xyz1234", 4}, + {"123", "a3", 1}, + } { + actual := commonSuffixLength([]rune(tc.Text1), []rune(tc.Text2)) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestDiffCommonOverlap(t *testing.T) { + type TestCase struct { + Name string + + Text1 string + Text2 string + + Expected int + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Null", "", "abcd", 0}, + {"Whole", "abc", "abcd", 3}, + {"Null", "123456", "abcd", 0}, + {"Null", "123456xxx", "xxxabcd", 3}, + // Some overly clever languages (C#) may treat ligatures as equal to their component letters, e.g. U+FB01 == 'fi' + {"Unicode", "fi", "\ufb01i", 0}, + } { + actual := dmp.DiffCommonOverlap(tc.Text1, tc.Text2) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestDiffHalfMatch(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + + Expected []string + } + + dmp := New() + dmp.DiffTimeout = 1 + + for i, tc := range []TestCase{ + // No match + {"1234567890", "abcdef", nil}, + {"12345", "23", nil}, + + // Single Match + {"1234567890", "a345678z", []string{"12", "90", "a", "z", "345678"}}, + {"a345678z", "1234567890", []string{"a", "z", "12", "90", "345678"}}, + {"abc56789z", "1234567890", []string{"abc", "z", "1234", "0", "56789"}}, + {"a23456xyz", "1234567890", []string{"a", "xyz", "1", "7890", "23456"}}, + + // Multiple Matches + {"121231234123451234123121", "a1234123451234z", []string{"12123", "123121", "a", "z", "1234123451234"}}, + {"x-=-=-=-=-=-=-=-=-=-=-=-=", "xx-=-=-=-=-=-=-=", []string{"", "-=-=-=-=-=", "x", "", "x-=-=-=-=-=-=-="}}, + {"-=-=-=-=-=-=-=-=-=-=-=-=y", "-=-=-=-=-=-=-=yy", []string{"-=-=-=-=-=", "", "", "y", "-=-=-=-=-=-=-=y"}}, + + // Non-optimal halfmatch, ptimal diff would be -q+x=H-i+e=lloHe+Hu=llo-Hew+y not -qHillo+x=HelloHe-w+Hulloy + {"qHilloHelloHew", "xHelloHeHulloy", []string{"qHillo", "w", "x", "Hulloy", "HelloHe"}}, + } { + actual := dmp.DiffHalfMatch(tc.Text1, tc.Text2) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } + + dmp.DiffTimeout = 0 + + for i, tc := range []TestCase{ + // Optimal no halfmatch + {"qHilloHelloHew", "xHelloHeHulloy", nil}, + } { + actual := dmp.DiffHalfMatch(tc.Text1, tc.Text2) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func BenchmarkDiffHalfMatch(b *testing.B) { + s1, s2 := speedtestTexts() + + dmp := New() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + dmp.DiffHalfMatch(s1, s2) + } +} + +func TestDiffBisectSplit(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + } + + dmp := New() + + for _, tc := range []TestCase{ + {"STUV\x05WX\x05YZ\x05[", "WĺĻļ\x05YZ\x05ĽľĿŀZ"}, + } { + diffs := dmp.diffBisectSplit([]rune(tc.Text1), + []rune(tc.Text2), 7, 6, time.Now().Add(time.Hour)) + + for _, d := range diffs { + assert.True(t, utf8.ValidString(d.Text)) + } + + // TODO define the expected outcome + } +} + +func TestDiffLinesToChars(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + + ExpectedChars1 string + ExpectedChars2 string + ExpectedLines []string + } + + dmp := New() + + for i, tc := range []TestCase{ + {"", "alpha\r\nbeta\r\n\r\n\r\n", "", "\u0001\u0002\u0003\u0003", []string{"", "alpha\r\n", "beta\r\n", "\r\n"}}, + {"a", "b", "\u0001", "\u0002", []string{"", "a", "b"}}, + // Omit final newline. + {"alpha\nbeta\nalpha", "", "\u0001\u0002\u0003", "", []string{"", "alpha\n", "beta\n", "alpha"}}, + } { + actualChars1, actualChars2, actualLines := dmp.DiffLinesToChars(tc.Text1, tc.Text2) + assert.Equal(t, tc.ExpectedChars1, actualChars1, fmt.Sprintf("Test case #%d, %#v", i, tc)) + assert.Equal(t, tc.ExpectedChars2, actualChars2, fmt.Sprintf("Test case #%d, %#v", i, tc)) + assert.Equal(t, tc.ExpectedLines, actualLines, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } + + // More than 256 to reveal any 8-bit limitations. + n := 300 + lineList := []string{ + "", // Account for the initial empty element of the lines array. + } + var charList []rune + for x := 1; x < n+1; x++ { + lineList = append(lineList, strconv.Itoa(x)+"\n") + charList = append(charList, rune(x)) + } + lines := strings.Join(lineList, "") + chars := string(charList) + assert.Equal(t, n, utf8.RuneCountInString(chars)) + + actualChars1, actualChars2, actualLines := dmp.DiffLinesToChars(lines, "") + assert.Equal(t, chars, actualChars1) + assert.Equal(t, "", actualChars2) + assert.Equal(t, lineList, actualLines) +} + +func TestDiffCharsToLines(t *testing.T) { + type TestCase struct { + Diffs []Diff + Lines []string + + Expected []Diff + } + + dmp := New() + + for i, tc := range []TestCase{ + { + Diffs: []Diff{ + {DiffEqual, "\u0001\u0002\u0001"}, + {DiffInsert, "\u0002\u0001\u0002"}, + }, + Lines: []string{"", "alpha\n", "beta\n"}, + + Expected: []Diff{ + {DiffEqual, "alpha\nbeta\nalpha\n"}, + {DiffInsert, "beta\nalpha\nbeta\n"}, + }, + }, + } { + actual := dmp.DiffCharsToLines(tc.Diffs, tc.Lines) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } + + // More than 256 to reveal any 8-bit limitations. + n := 300 + lineList := []string{ + "", // Account for the initial empty element of the lines array. + } + charList := []rune{} + for x := 1; x <= n; x++ { + lineList = append(lineList, strconv.Itoa(x)+"\n") + charList = append(charList, rune(x)) + } + assert.Equal(t, n, len(charList)) + + actual := dmp.DiffCharsToLines([]Diff{Diff{DiffDelete, string(charList)}}, lineList) + assert.Equal(t, []Diff{Diff{DiffDelete, strings.Join(lineList, "")}}, actual) +} + +func TestDiffCleanupMerge(t *testing.T) { + type TestCase struct { + Name string + + Diffs []Diff + + Expected []Diff + } + + dmp := New() + + for i, tc := range []TestCase{ + { + "Null case", + []Diff{}, + []Diff{}, + }, + { + "No Diff case", + []Diff{Diff{DiffEqual, "a"}, Diff{DiffDelete, "b"}, Diff{DiffInsert, "c"}}, + []Diff{Diff{DiffEqual, "a"}, Diff{DiffDelete, "b"}, Diff{DiffInsert, "c"}}, + }, + { + "Merge equalities", + []Diff{Diff{DiffEqual, "a"}, Diff{DiffEqual, "b"}, Diff{DiffEqual, "c"}}, + []Diff{Diff{DiffEqual, "abc"}}, + }, + { + "Merge deletions", + []Diff{Diff{DiffDelete, "a"}, Diff{DiffDelete, "b"}, Diff{DiffDelete, "c"}}, + []Diff{Diff{DiffDelete, "abc"}}, + }, + { + "Merge insertions", + []Diff{Diff{DiffInsert, "a"}, Diff{DiffInsert, "b"}, Diff{DiffInsert, "c"}}, + []Diff{Diff{DiffInsert, "abc"}}, + }, + { + "Merge interweave", + []Diff{Diff{DiffDelete, "a"}, Diff{DiffInsert, "b"}, Diff{DiffDelete, "c"}, Diff{DiffInsert, "d"}, Diff{DiffEqual, "e"}, Diff{DiffEqual, "f"}}, + []Diff{Diff{DiffDelete, "ac"}, Diff{DiffInsert, "bd"}, Diff{DiffEqual, "ef"}}, + }, + { + "Prefix and suffix detection", + []Diff{Diff{DiffDelete, "a"}, Diff{DiffInsert, "abc"}, Diff{DiffDelete, "dc"}}, + []Diff{Diff{DiffEqual, "a"}, Diff{DiffDelete, "d"}, Diff{DiffInsert, "b"}, Diff{DiffEqual, "c"}}, + }, + { + "Prefix and suffix detection with equalities", + []Diff{Diff{DiffEqual, "x"}, Diff{DiffDelete, "a"}, Diff{DiffInsert, "abc"}, Diff{DiffDelete, "dc"}, Diff{DiffEqual, "y"}}, + []Diff{Diff{DiffEqual, "xa"}, Diff{DiffDelete, "d"}, Diff{DiffInsert, "b"}, Diff{DiffEqual, "cy"}}, + }, + { + "Same test as above but with unicode (\u0101 will appear in diffs with at least 257 unique lines)", + []Diff{Diff{DiffEqual, "x"}, Diff{DiffDelete, "\u0101"}, Diff{DiffInsert, "\u0101bc"}, Diff{DiffDelete, "dc"}, Diff{DiffEqual, "y"}}, + []Diff{Diff{DiffEqual, "x\u0101"}, Diff{DiffDelete, "d"}, Diff{DiffInsert, "b"}, Diff{DiffEqual, "cy"}}, + }, + { + "Slide edit left", + []Diff{Diff{DiffEqual, "a"}, Diff{DiffInsert, "ba"}, Diff{DiffEqual, "c"}}, + []Diff{Diff{DiffInsert, "ab"}, Diff{DiffEqual, "ac"}}, + }, + { + "Slide edit right", + []Diff{Diff{DiffEqual, "c"}, Diff{DiffInsert, "ab"}, Diff{DiffEqual, "a"}}, + []Diff{Diff{DiffEqual, "ca"}, Diff{DiffInsert, "ba"}}, + }, + { + "Slide edit left recursive", + []Diff{Diff{DiffEqual, "a"}, Diff{DiffDelete, "b"}, Diff{DiffEqual, "c"}, Diff{DiffDelete, "ac"}, Diff{DiffEqual, "x"}}, + []Diff{Diff{DiffDelete, "abc"}, Diff{DiffEqual, "acx"}}, + }, + { + "Slide edit right recursive", + []Diff{Diff{DiffEqual, "x"}, Diff{DiffDelete, "ca"}, Diff{DiffEqual, "c"}, Diff{DiffDelete, "b"}, Diff{DiffEqual, "a"}}, + []Diff{Diff{DiffEqual, "xca"}, Diff{DiffDelete, "cba"}}, + }, + } { + actual := dmp.DiffCleanupMerge(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestDiffCleanupSemanticLossless(t *testing.T) { + type TestCase struct { + Name string + + Diffs []Diff + + Expected []Diff + } + + dmp := New() + + for i, tc := range []TestCase{ + { + "Null case", + []Diff{}, + []Diff{}, + }, + { + "Blank lines", + []Diff{ + Diff{DiffEqual, "AAA\r\n\r\nBBB"}, + Diff{DiffInsert, "\r\nDDD\r\n\r\nBBB"}, + Diff{DiffEqual, "\r\nEEE"}, + }, + []Diff{ + Diff{DiffEqual, "AAA\r\n\r\n"}, + Diff{DiffInsert, "BBB\r\nDDD\r\n\r\n"}, + Diff{DiffEqual, "BBB\r\nEEE"}, + }, + }, + { + "Line boundaries", + []Diff{ + Diff{DiffEqual, "AAA\r\nBBB"}, + Diff{DiffInsert, " DDD\r\nBBB"}, + Diff{DiffEqual, " EEE"}, + }, + []Diff{ + Diff{DiffEqual, "AAA\r\n"}, + Diff{DiffInsert, "BBB DDD\r\n"}, + Diff{DiffEqual, "BBB EEE"}, + }, + }, + { + "Word boundaries", + []Diff{ + Diff{DiffEqual, "The c"}, + Diff{DiffInsert, "ow and the c"}, + Diff{DiffEqual, "at."}, + }, + []Diff{ + Diff{DiffEqual, "The "}, + Diff{DiffInsert, "cow and the "}, + Diff{DiffEqual, "cat."}, + }, + }, + { + "Alphanumeric boundaries", + []Diff{ + Diff{DiffEqual, "The-c"}, + Diff{DiffInsert, "ow-and-the-c"}, + Diff{DiffEqual, "at."}, + }, + []Diff{ + Diff{DiffEqual, "The-"}, + Diff{DiffInsert, "cow-and-the-"}, + Diff{DiffEqual, "cat."}, + }, + }, + { + "Hitting the start", + []Diff{ + Diff{DiffEqual, "a"}, + Diff{DiffDelete, "a"}, + Diff{DiffEqual, "ax"}, + }, + []Diff{ + Diff{DiffDelete, "a"}, + Diff{DiffEqual, "aax"}, + }, + }, + { + "Hitting the end", + []Diff{ + Diff{DiffEqual, "xa"}, + Diff{DiffDelete, "a"}, + Diff{DiffEqual, "a"}, + }, + []Diff{ + Diff{DiffEqual, "xaa"}, + Diff{DiffDelete, "a"}, + }, + }, + { + "Sentence boundaries", + []Diff{ + Diff{DiffEqual, "The xxx. The "}, + Diff{DiffInsert, "zzz. The "}, + Diff{DiffEqual, "yyy."}, + }, + []Diff{ + Diff{DiffEqual, "The xxx."}, + Diff{DiffInsert, " The zzz."}, + Diff{DiffEqual, " The yyy."}, + }, + }, + { + "UTF-8 strings", + []Diff{ + Diff{DiffEqual, "The ♕. The "}, + Diff{DiffInsert, "♔. The "}, + Diff{DiffEqual, "♖."}, + }, + []Diff{ + Diff{DiffEqual, "The ♕."}, + Diff{DiffInsert, " The ♔."}, + Diff{DiffEqual, " The ♖."}, + }, + }, + { + "Rune boundaries", + []Diff{ + Diff{DiffEqual, "♕♕"}, + Diff{DiffInsert, "♔♔"}, + Diff{DiffEqual, "♖♖"}, + }, + []Diff{ + Diff{DiffEqual, "♕♕"}, + Diff{DiffInsert, "♔♔"}, + Diff{DiffEqual, "♖♖"}, + }, + }, + } { + actual := dmp.DiffCleanupSemanticLossless(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestDiffCleanupSemantic(t *testing.T) { + type TestCase struct { + Name string + + Diffs []Diff + + Expected []Diff + } + + dmp := New() + + for i, tc := range []TestCase{ + { + "Null case", + []Diff{}, + []Diff{}, + }, + { + "No elimination #1", + []Diff{ + {DiffDelete, "ab"}, + {DiffInsert, "cd"}, + {DiffEqual, "12"}, + {DiffDelete, "e"}, + }, + []Diff{ + {DiffDelete, "ab"}, + {DiffInsert, "cd"}, + {DiffEqual, "12"}, + {DiffDelete, "e"}, + }, + }, + { + "No elimination #2", + []Diff{ + {DiffDelete, "abc"}, + {DiffInsert, "ABC"}, + {DiffEqual, "1234"}, + {DiffDelete, "wxyz"}, + }, + []Diff{ + {DiffDelete, "abc"}, + {DiffInsert, "ABC"}, + {DiffEqual, "1234"}, + {DiffDelete, "wxyz"}, + }, + }, + { + "No elimination #3", + []Diff{ + {DiffEqual, "2016-09-01T03:07:1"}, + {DiffInsert, "5.15"}, + {DiffEqual, "4"}, + {DiffDelete, "."}, + {DiffEqual, "80"}, + {DiffInsert, "0"}, + {DiffEqual, "78"}, + {DiffDelete, "3074"}, + {DiffEqual, "1Z"}, + }, + []Diff{ + {DiffEqual, "2016-09-01T03:07:1"}, + {DiffInsert, "5.15"}, + {DiffEqual, "4"}, + {DiffDelete, "."}, + {DiffEqual, "80"}, + {DiffInsert, "0"}, + {DiffEqual, "78"}, + {DiffDelete, "3074"}, + {DiffEqual, "1Z"}, + }, + }, + { + "Simple elimination", + []Diff{ + {DiffDelete, "a"}, + {DiffEqual, "b"}, + {DiffDelete, "c"}, + }, + []Diff{ + {DiffDelete, "abc"}, + {DiffInsert, "b"}, + }, + }, + { + "Backpass elimination", + []Diff{ + {DiffDelete, "ab"}, + {DiffEqual, "cd"}, + {DiffDelete, "e"}, + {DiffEqual, "f"}, + {DiffInsert, "g"}, + }, + []Diff{ + {DiffDelete, "abcdef"}, + {DiffInsert, "cdfg"}, + }, + }, + { + "Multiple eliminations", + []Diff{ + {DiffInsert, "1"}, + {DiffEqual, "A"}, + {DiffDelete, "B"}, + {DiffInsert, "2"}, + {DiffEqual, "_"}, + {DiffInsert, "1"}, + {DiffEqual, "A"}, + {DiffDelete, "B"}, + {DiffInsert, "2"}, + }, + []Diff{ + {DiffDelete, "AB_AB"}, + {DiffInsert, "1A2_1A2"}, + }, + }, + { + "Word boundaries", + []Diff{ + {DiffEqual, "The c"}, + {DiffDelete, "ow and the c"}, + {DiffEqual, "at."}, + }, + []Diff{ + {DiffEqual, "The "}, + {DiffDelete, "cow and the "}, + {DiffEqual, "cat."}, + }, + }, + { + "No overlap elimination", + []Diff{ + {DiffDelete, "abcxx"}, + {DiffInsert, "xxdef"}, + }, + []Diff{ + {DiffDelete, "abcxx"}, + {DiffInsert, "xxdef"}, + }, + }, + { + "Overlap elimination", + []Diff{ + {DiffDelete, "abcxxx"}, + {DiffInsert, "xxxdef"}, + }, + []Diff{ + {DiffDelete, "abc"}, + {DiffEqual, "xxx"}, + {DiffInsert, "def"}, + }, + }, + { + "Reverse overlap elimination", + []Diff{ + {DiffDelete, "xxxabc"}, + {DiffInsert, "defxxx"}, + }, + []Diff{ + {DiffInsert, "def"}, + {DiffEqual, "xxx"}, + {DiffDelete, "abc"}, + }, + }, + { + "Two overlap eliminations", + []Diff{ + {DiffDelete, "abcd1212"}, + {DiffInsert, "1212efghi"}, + {DiffEqual, "----"}, + {DiffDelete, "A3"}, + {DiffInsert, "3BC"}, + }, + []Diff{ + {DiffDelete, "abcd"}, + {DiffEqual, "1212"}, + {DiffInsert, "efghi"}, + {DiffEqual, "----"}, + {DiffDelete, "A"}, + {DiffEqual, "3"}, + {DiffInsert, "BC"}, + }, + }, + { + "Test case for adapting DiffCleanupSemantic to be equal to the Python version #19", + []Diff{ + {DiffEqual, "James McCarthy "}, + {DiffDelete, "close to "}, + {DiffEqual, "sign"}, + {DiffDelete, "ing"}, + {DiffInsert, "s"}, + {DiffEqual, " new "}, + {DiffDelete, "E"}, + {DiffInsert, "fi"}, + {DiffEqual, "ve"}, + {DiffInsert, "-yea"}, + {DiffEqual, "r"}, + {DiffDelete, "ton"}, + {DiffEqual, " deal"}, + {DiffInsert, " at Everton"}, + }, + []Diff{ + {DiffEqual, "James McCarthy "}, + {DiffDelete, "close to "}, + {DiffEqual, "sign"}, + {DiffDelete, "ing"}, + {DiffInsert, "s"}, + {DiffEqual, " new "}, + {DiffInsert, "five-year deal at "}, + {DiffEqual, "Everton"}, + {DiffDelete, " deal"}, + }, + }, + } { + actual := dmp.DiffCleanupSemantic(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func BenchmarkDiffCleanupSemantic(b *testing.B) { + s1, s2 := speedtestTexts() + + dmp := New() + + diffs := dmp.DiffMain(s1, s2, false) + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + dmp.DiffCleanupSemantic(diffs) + } +} + +func TestDiffCleanupEfficiency(t *testing.T) { + type TestCase struct { + Name string + + Diffs []Diff + + Expected []Diff + } + + dmp := New() + dmp.DiffEditCost = 4 + + for i, tc := range []TestCase{ + { + "Null case", + []Diff{}, + []Diff{}, + }, + { + "No elimination", + []Diff{ + Diff{DiffDelete, "ab"}, + Diff{DiffInsert, "12"}, + Diff{DiffEqual, "wxyz"}, + Diff{DiffDelete, "cd"}, + Diff{DiffInsert, "34"}, + }, + []Diff{ + Diff{DiffDelete, "ab"}, + Diff{DiffInsert, "12"}, + Diff{DiffEqual, "wxyz"}, + Diff{DiffDelete, "cd"}, + Diff{DiffInsert, "34"}, + }, + }, + { + "Four-edit elimination", + []Diff{ + Diff{DiffDelete, "ab"}, + Diff{DiffInsert, "12"}, + Diff{DiffEqual, "xyz"}, + Diff{DiffDelete, "cd"}, + Diff{DiffInsert, "34"}, + }, + []Diff{ + Diff{DiffDelete, "abxyzcd"}, + Diff{DiffInsert, "12xyz34"}, + }, + }, + { + "Three-edit elimination", + []Diff{ + Diff{DiffInsert, "12"}, + Diff{DiffEqual, "x"}, + Diff{DiffDelete, "cd"}, + Diff{DiffInsert, "34"}, + }, + []Diff{ + Diff{DiffDelete, "xcd"}, + Diff{DiffInsert, "12x34"}, + }, + }, + { + "Backpass elimination", + []Diff{ + Diff{DiffDelete, "ab"}, + Diff{DiffInsert, "12"}, + Diff{DiffEqual, "xy"}, + Diff{DiffInsert, "34"}, + Diff{DiffEqual, "z"}, + Diff{DiffDelete, "cd"}, + Diff{DiffInsert, "56"}, + }, + []Diff{ + Diff{DiffDelete, "abxyzcd"}, + Diff{DiffInsert, "12xy34z56"}, + }, + }, + } { + actual := dmp.DiffCleanupEfficiency(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.DiffEditCost = 5 + + for i, tc := range []TestCase{ + { + "High cost elimination", + []Diff{ + Diff{DiffDelete, "ab"}, + Diff{DiffInsert, "12"}, + Diff{DiffEqual, "wxyz"}, + Diff{DiffDelete, "cd"}, + Diff{DiffInsert, "34"}, + }, + []Diff{ + Diff{DiffDelete, "abwxyzcd"}, + Diff{DiffInsert, "12wxyz34"}, + }, + }, + } { + actual := dmp.DiffCleanupEfficiency(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestDiffPrettyHtml(t *testing.T) { + type TestCase struct { + Diffs []Diff + + Expected string + } + + dmp := New() + + for i, tc := range []TestCase{ + { + Diffs: []Diff{ + {DiffEqual, "a\n"}, + {DiffDelete, "b"}, + {DiffInsert, "c&d"}, + }, + + Expected: "
<B>b</B>c&d", + }, + } { + actual := dmp.DiffPrettyHtml(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestDiffPrettyText(t *testing.T) { + type TestCase struct { + Diffs []Diff + + Expected string + } + + dmp := New() + + for i, tc := range []TestCase{ + { + Diffs: []Diff{ + {DiffEqual, "a\n"}, + {DiffDelete, "b"}, + {DiffInsert, "c&d"}, + }, + + Expected: "a\n\x1b[31mb\x1b[0m\x1b[32mc&d\x1b[0m", + }, + } { + actual := dmp.DiffPrettyText(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestDiffText(t *testing.T) { + type TestCase struct { + Diffs []Diff + + ExpectedText1 string + ExpectedText2 string + } + + dmp := New() + + for i, tc := range []TestCase{ + { + Diffs: []Diff{ + {DiffEqual, "jump"}, + {DiffDelete, "s"}, + {DiffInsert, "ed"}, + {DiffEqual, " over "}, + {DiffDelete, "the"}, + {DiffInsert, "a"}, + {DiffEqual, " lazy"}, + }, + + ExpectedText1: "jumps over the lazy", + ExpectedText2: "jumped over a lazy", + }, + } { + actualText1 := dmp.DiffText1(tc.Diffs) + assert.Equal(t, tc.ExpectedText1, actualText1, fmt.Sprintf("Test case #%d, %#v", i, tc)) + + actualText2 := dmp.DiffText2(tc.Diffs) + assert.Equal(t, tc.ExpectedText2, actualText2, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestDiffDelta(t *testing.T) { + type TestCase struct { + Name string + + Text string + Delta string + + ErrorMessagePrefix string + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Delta shorter than text", "jumps over the lazyx", "=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", "Delta length (19) is different from source text length (20)"}, + {"Delta longer than text", "umps over the lazy", "=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", "Delta length (19) is different from source text length (18)"}, + {"Invalid URL escaping", "", "+%c3%xy", "invalid URL escape \"%xy\""}, + {"Invalid UTF-8 sequence", "", "+%c3xy", "invalid UTF-8 token: \"\\xc3xy\""}, + {"Invalid diff operation", "", "a", "Invalid diff operation in DiffFromDelta: a"}, + {"Invalid diff syntax", "", "-", "strconv.ParseInt: parsing \"\": invalid syntax"}, + {"Negative number in delta", "", "--1", "Negative number in DiffFromDelta: -1"}, + {"Empty case", "", "", ""}, + } { + diffs, err := dmp.DiffFromDelta(tc.Text, tc.Delta) + msg := fmt.Sprintf("Test case #%d, %s", i, tc.Name) + if tc.ErrorMessagePrefix == "" { + assert.Nil(t, err, msg) + assert.Nil(t, diffs, msg) + } else { + e := err.Error() + if strings.HasPrefix(e, tc.ErrorMessagePrefix) { + e = tc.ErrorMessagePrefix + } + assert.Nil(t, diffs, msg) + assert.Equal(t, tc.ErrorMessagePrefix, e, msg) + } + } + + // Convert a diff into delta string. + diffs := []Diff{ + Diff{DiffEqual, "jump"}, + Diff{DiffDelete, "s"}, + Diff{DiffInsert, "ed"}, + Diff{DiffEqual, " over "}, + Diff{DiffDelete, "the"}, + Diff{DiffInsert, "a"}, + Diff{DiffEqual, " lazy"}, + Diff{DiffInsert, "old dog"}, + } + text1 := dmp.DiffText1(diffs) + assert.Equal(t, "jumps over the lazy", text1) + + delta := dmp.DiffToDelta(diffs) + assert.Equal(t, "=4\t-1\t+ed\t=6\t-3\t+a\t=5\t+old dog", delta) + + // Convert delta string into a diff. + deltaDiffs, err := dmp.DiffFromDelta(text1, delta) + assert.Equal(t, diffs, deltaDiffs) + + // Test deltas with special characters. + diffs = []Diff{ + Diff{DiffEqual, "\u0680 \x00 \t %"}, + Diff{DiffDelete, "\u0681 \x01 \n ^"}, + Diff{DiffInsert, "\u0682 \x02 \\ |"}, + } + text1 = dmp.DiffText1(diffs) + assert.Equal(t, "\u0680 \x00 \t %\u0681 \x01 \n ^", text1) + + // Lowercase, due to UrlEncode uses lower. + delta = dmp.DiffToDelta(diffs) + assert.Equal(t, "=7\t-7\t+%DA%82 %02 %5C %7C", delta) + + deltaDiffs, err = dmp.DiffFromDelta(text1, delta) + assert.Equal(t, diffs, deltaDiffs) + assert.Nil(t, err) + + // Verify pool of unchanged characters. + diffs = []Diff{ + Diff{DiffInsert, "A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # "}, + } + + delta = dmp.DiffToDelta(diffs) + assert.Equal(t, "+A-Z a-z 0-9 - _ . ! ~ * ' ( ) ; / ? : @ & = + $ , # ", delta, "Unchanged characters.") + + // Convert delta string into a diff. + deltaDiffs, err = dmp.DiffFromDelta("", delta) + assert.Equal(t, diffs, deltaDiffs) + assert.Nil(t, err) +} + +func TestDiffXIndex(t *testing.T) { + type TestCase struct { + Name string + + Diffs []Diff + Location int + + Expected int + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Translation on equality", []Diff{{DiffDelete, "a"}, {DiffInsert, "1234"}, {DiffEqual, "xyz"}}, 2, 5}, + {"Translation on deletion", []Diff{{DiffEqual, "a"}, {DiffDelete, "1234"}, {DiffEqual, "xyz"}}, 3, 1}, + } { + actual := dmp.DiffXIndex(tc.Diffs, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestDiffLevenshtein(t *testing.T) { + type TestCase struct { + Name string + + Diffs []Diff + + Expected int + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Levenshtein with trailing equality", []Diff{{DiffDelete, "abc"}, {DiffInsert, "1234"}, {DiffEqual, "xyz"}}, 4}, + {"Levenshtein with leading equality", []Diff{{DiffEqual, "xyz"}, {DiffDelete, "abc"}, {DiffInsert, "1234"}}, 4}, + {"Levenshtein with middle equality", []Diff{{DiffDelete, "abc"}, {DiffEqual, "xyz"}, {DiffInsert, "1234"}}, 7}, + } { + actual := dmp.DiffLevenshtein(tc.Diffs) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestDiffBisect(t *testing.T) { + type TestCase struct { + Name string + + Time time.Time + + Expected []Diff + } + + dmp := New() + + for i, tc := range []TestCase{ + { + Name: "normal", + Time: time.Date(9999, time.December, 31, 23, 59, 59, 59, time.UTC), + + Expected: []Diff{ + {DiffDelete, "c"}, + {DiffInsert, "m"}, + {DiffEqual, "a"}, + {DiffDelete, "t"}, + {DiffInsert, "p"}, + }, + }, + { + Name: "Negative deadlines count as having infinite time", + Time: time.Date(0001, time.January, 01, 00, 00, 00, 00, time.UTC), + + Expected: []Diff{ + {DiffDelete, "c"}, + {DiffInsert, "m"}, + {DiffEqual, "a"}, + {DiffDelete, "t"}, + {DiffInsert, "p"}, + }, + }, + { + Name: "Timeout", + Time: time.Now().Add(time.Nanosecond), + + Expected: []Diff{ + {DiffDelete, "cat"}, + {DiffInsert, "map"}, + }, + }, + } { + actual := dmp.DiffBisect("cat", "map", tc.Time) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + // Test for invalid UTF-8 sequences + assert.Equal(t, []Diff{ + Diff{DiffEqual, "��"}, + }, dmp.DiffBisect("\xe0\xe5", "\xe0\xe5", time.Now().Add(time.Minute))) +} + +func TestDiffMain(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + + Expected []Diff + } + + dmp := New() + + // Perform a trivial diff. + for i, tc := range []TestCase{ + { + "", + "", + nil, + }, + { + "abc", + "abc", + []Diff{Diff{DiffEqual, "abc"}}, + }, + { + "abc", + "ab123c", + []Diff{Diff{DiffEqual, "ab"}, Diff{DiffInsert, "123"}, Diff{DiffEqual, "c"}}, + }, + { + "a123bc", + "abc", + []Diff{Diff{DiffEqual, "a"}, Diff{DiffDelete, "123"}, Diff{DiffEqual, "bc"}}, + }, + { + "abc", + "a123b456c", + []Diff{Diff{DiffEqual, "a"}, Diff{DiffInsert, "123"}, Diff{DiffEqual, "b"}, Diff{DiffInsert, "456"}, Diff{DiffEqual, "c"}}, + }, + { + "a123b456c", + "abc", + []Diff{Diff{DiffEqual, "a"}, Diff{DiffDelete, "123"}, Diff{DiffEqual, "b"}, Diff{DiffDelete, "456"}, Diff{DiffEqual, "c"}}, + }, + } { + actual := dmp.DiffMain(tc.Text1, tc.Text2, false) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } + + // Perform a real diff and switch off the timeout. + dmp.DiffTimeout = 0 + + for i, tc := range []TestCase{ + { + "a", + "b", + []Diff{Diff{DiffDelete, "a"}, Diff{DiffInsert, "b"}}, + }, + { + "Apples are a fruit.", + "Bananas are also fruit.", + []Diff{ + Diff{DiffDelete, "Apple"}, + Diff{DiffInsert, "Banana"}, + Diff{DiffEqual, "s are a"}, + Diff{DiffInsert, "lso"}, + Diff{DiffEqual, " fruit."}, + }, + }, + { + "ax\t", + "\u0680x\u0000", + []Diff{ + Diff{DiffDelete, "a"}, + Diff{DiffInsert, "\u0680"}, + Diff{DiffEqual, "x"}, + Diff{DiffDelete, "\t"}, + Diff{DiffInsert, "\u0000"}, + }, + }, + { + "1ayb2", + "abxab", + []Diff{ + Diff{DiffDelete, "1"}, + Diff{DiffEqual, "a"}, + Diff{DiffDelete, "y"}, + Diff{DiffEqual, "b"}, + Diff{DiffDelete, "2"}, + Diff{DiffInsert, "xab"}, + }, + }, + { + "abcy", + "xaxcxabc", + []Diff{ + Diff{DiffInsert, "xaxcx"}, + Diff{DiffEqual, "abc"}, Diff{DiffDelete, "y"}, + }, + }, + { + "ABCDa=bcd=efghijklmnopqrsEFGHIJKLMNOefg", + "a-bcd-efghijklmnopqrs", + []Diff{ + Diff{DiffDelete, "ABCD"}, + Diff{DiffEqual, "a"}, + Diff{DiffDelete, "="}, + Diff{DiffInsert, "-"}, + Diff{DiffEqual, "bcd"}, + Diff{DiffDelete, "="}, + Diff{DiffInsert, "-"}, + Diff{DiffEqual, "efghijklmnopqrs"}, + Diff{DiffDelete, "EFGHIJKLMNOefg"}, + }, + }, + { + "a [[Pennsylvania]] and [[New", + " and [[Pennsylvania]]", + []Diff{ + Diff{DiffInsert, " "}, + Diff{DiffEqual, "a"}, + Diff{DiffInsert, "nd"}, + Diff{DiffEqual, " [[Pennsylvania]]"}, + Diff{DiffDelete, " and [[New"}, + }, + }, + } { + actual := dmp.DiffMain(tc.Text1, tc.Text2, false) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } + + // Test for invalid UTF-8 sequences + assert.Equal(t, []Diff{ + Diff{DiffDelete, "��"}, + }, dmp.DiffMain("\xe0\xe5", "", false)) +} + +func TestDiffMainWithTimeout(t *testing.T) { + dmp := New() + dmp.DiffTimeout = 200 * time.Millisecond + + a := "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n" + b := "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n" + // Increase the text lengths by 1024 times to ensure a timeout. + for x := 0; x < 13; x++ { + a = a + a + b = b + b + } + + startTime := time.Now() + dmp.DiffMain(a, b, true) + endTime := time.Now() + + delta := endTime.Sub(startTime) + + // Test that we took at least the timeout period. + assert.True(t, delta >= dmp.DiffTimeout, fmt.Sprintf("%v !>= %v", delta, dmp.DiffTimeout)) + + // Test that we didn't take forever (be very forgiving). Theoretically this test could fail very occasionally if the OS task swaps or locks up for a second at the wrong moment. + assert.True(t, delta < (dmp.DiffTimeout*100), fmt.Sprintf("%v !< %v", delta, dmp.DiffTimeout*100)) +} + +func TestDiffMainWithCheckLines(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + } + + dmp := New() + dmp.DiffTimeout = 0 + + // Test cases must be at least 100 chars long to pass the cutoff. + for i, tc := range []TestCase{ + { + "1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n", + "abcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\nabcdefghij\n", + }, + { + "1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890", + "abcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghijabcdefghij", + }, + { + "1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n1234567890\n", + "abcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n1234567890\n1234567890\n1234567890\nabcdefghij\n", + }, + } { + resultWithoutCheckLines := dmp.DiffMain(tc.Text1, tc.Text2, false) + resultWithCheckLines := dmp.DiffMain(tc.Text1, tc.Text2, true) + + // TODO this fails for the third test case, why? + if i != 2 { + assert.Equal(t, resultWithoutCheckLines, resultWithCheckLines, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } + assert.Equal(t, diffRebuildTexts(resultWithoutCheckLines), diffRebuildTexts(resultWithCheckLines), fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func BenchmarkDiffMain(bench *testing.B) { + s1 := "`Twas brillig, and the slithy toves\nDid gyre and gimble in the wabe:\nAll mimsy were the borogoves,\nAnd the mome raths outgrabe.\n" + s2 := "I am the very model of a modern major general,\nI've information vegetable, animal, and mineral,\nI know the kings of England, and I quote the fights historical,\nFrom Marathon to Waterloo, in order categorical.\n" + + // Increase the text lengths by 1024 times to ensure a timeout. + for x := 0; x < 10; x++ { + s1 = s1 + s1 + s2 = s2 + s2 + } + + dmp := New() + dmp.DiffTimeout = time.Second + + bench.ResetTimer() + + for i := 0; i < bench.N; i++ { + dmp.DiffMain(s1, s2, true) + } +} + +func BenchmarkDiffMainLarge(b *testing.B) { + s1, s2 := speedtestTexts() + + dmp := New() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + dmp.DiffMain(s1, s2, true) + } +} + +func BenchmarkDiffMainRunesLargeLines(b *testing.B) { + s1, s2 := speedtestTexts() + + dmp := New() + + b.ResetTimer() + + for i := 0; i < b.N; i++ { + text1, text2, linearray := dmp.DiffLinesToRunes(s1, s2) + + diffs := dmp.DiffMainRunes(text1, text2, false) + diffs = dmp.DiffCharsToLines(diffs, linearray) + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match_test.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match_test.go new file mode 100644 index 0000000000..f9abe60a7e --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match_test.go @@ -0,0 +1,174 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMatchAlphabet(t *testing.T) { + type TestCase struct { + Pattern string + + Expected map[byte]int + } + + dmp := New() + + for i, tc := range []TestCase{ + { + Pattern: "abc", + + Expected: map[byte]int{ + 'a': 4, + 'b': 2, + 'c': 1, + }, + }, + { + Pattern: "abcaba", + + Expected: map[byte]int{ + 'a': 37, + 'b': 18, + 'c': 8, + }, + }, + } { + actual := dmp.MatchAlphabet(tc.Pattern) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestMatchBitap(t *testing.T) { + type TestCase struct { + Name string + + Text string + Pattern string + Location int + + Expected int + } + + dmp := New() + dmp.MatchDistance = 100 + dmp.MatchThreshold = 0.5 + + for i, tc := range []TestCase{ + {"Exact match #1", "abcdefghijk", "fgh", 5, 5}, + {"Exact match #2", "abcdefghijk", "fgh", 0, 5}, + {"Fuzzy match #1", "abcdefghijk", "efxhi", 0, 4}, + {"Fuzzy match #2", "abcdefghijk", "cdefxyhijk", 5, 2}, + {"Fuzzy match #3", "abcdefghijk", "bxy", 1, -1}, + {"Overflow", "123456789xx0", "3456789x0", 2, 2}, + {"Before start match", "abcdef", "xxabc", 4, 0}, + {"Beyond end match", "abcdef", "defyy", 4, 3}, + {"Oversized pattern", "abcdef", "xabcdefy", 0, 0}, + } { + actual := dmp.MatchBitap(tc.Text, tc.Pattern, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.MatchThreshold = 0.4 + + for i, tc := range []TestCase{ + {"Threshold #1", "abcdefghijk", "efxyhi", 1, 4}, + } { + actual := dmp.MatchBitap(tc.Text, tc.Pattern, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.MatchThreshold = 0.3 + + for i, tc := range []TestCase{ + {"Threshold #2", "abcdefghijk", "efxyhi", 1, -1}, + } { + actual := dmp.MatchBitap(tc.Text, tc.Pattern, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.MatchThreshold = 0.0 + + for i, tc := range []TestCase{ + {"Threshold #3", "abcdefghijk", "bcdef", 1, 1}, + } { + actual := dmp.MatchBitap(tc.Text, tc.Pattern, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.MatchThreshold = 0.5 + + for i, tc := range []TestCase{ + {"Multiple select #1", "abcdexyzabcde", "abccde", 3, 0}, + {"Multiple select #2", "abcdexyzabcde", "abccde", 5, 8}, + } { + actual := dmp.MatchBitap(tc.Text, tc.Pattern, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + // Strict location. + dmp.MatchDistance = 10 + + for i, tc := range []TestCase{ + {"Distance test #1", "abcdefghijklmnopqrstuvwxyz", "abcdefg", 24, -1}, + {"Distance test #2", "abcdefghijklmnopqrstuvwxyz", "abcdxxefg", 1, 0}, + } { + actual := dmp.MatchBitap(tc.Text, tc.Pattern, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + // Loose location. + dmp.MatchDistance = 1000 + + for i, tc := range []TestCase{ + {"Distance test #3", "abcdefghijklmnopqrstuvwxyz", "abcdefg", 24, 0}, + } { + actual := dmp.MatchBitap(tc.Text, tc.Pattern, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestMatchMain(t *testing.T) { + type TestCase struct { + Name string + + Text1 string + Text2 string + Location int + + Expected int + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Equality", "abcdef", "abcdef", 1000, 0}, + {"Null text", "", "abcdef", 1, -1}, + {"Null pattern", "abcdef", "", 3, 3}, + {"Exact match", "abcdef", "de", 3, 3}, + {"Beyond end match", "abcdef", "defy", 4, 3}, + {"Oversized pattern", "abcdef", "abcdefy", 0, 0}, + } { + actual := dmp.MatchMain(tc.Text1, tc.Text2, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.MatchThreshold = 0.7 + + for i, tc := range []TestCase{ + {"Complex match", "I am the very model of a modern major general.", " that berry ", 5, 4}, + } { + actual := dmp.MatchMain(tc.Text1, tc.Text2, tc.Location) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch_test.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch_test.go new file mode 100644 index 0000000000..fa1763e7db --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch_test.go @@ -0,0 +1,339 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "fmt" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPatchString(t *testing.T) { + type TestCase struct { + Patch Patch + + Expected string + } + + for i, tc := range []TestCase{ + { + Patch: Patch{ + start1: 20, + start2: 21, + length1: 18, + length2: 17, + + diffs: []Diff{ + {DiffEqual, "jump"}, + {DiffDelete, "s"}, + {DiffInsert, "ed"}, + {DiffEqual, " over "}, + {DiffDelete, "the"}, + {DiffInsert, "a"}, + {DiffEqual, "\nlaz"}, + }, + }, + + Expected: "@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", + }, + } { + actual := tc.Patch.String() + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestPatchFromText(t *testing.T) { + type TestCase struct { + Patch string + + ErrorMessagePrefix string + } + + dmp := New() + + for i, tc := range []TestCase{ + {"", ""}, + {"@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n %0Alaz\n", ""}, + {"@@ -1 +1 @@\n-a\n+b\n", ""}, + {"@@ -1,3 +0,0 @@\n-abc\n", ""}, + {"@@ -0,0 +1,3 @@\n+abc\n", ""}, + {"@@ _0,0 +0,0 @@\n+abc\n", "Invalid patch string: @@ _0,0 +0,0 @@"}, + {"Bad\nPatch\n", "Invalid patch string"}, + } { + patches, err := dmp.PatchFromText(tc.Patch) + if tc.ErrorMessagePrefix == "" { + assert.Nil(t, err) + + if tc.Patch == "" { + assert.Equal(t, []Patch{}, patches, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } else { + assert.Equal(t, tc.Patch, patches[0].String(), fmt.Sprintf("Test case #%d, %#v", i, tc)) + } + } else { + e := err.Error() + if strings.HasPrefix(e, tc.ErrorMessagePrefix) { + e = tc.ErrorMessagePrefix + } + assert.Equal(t, tc.ErrorMessagePrefix, e) + } + } + + diffs := []Diff{ + {DiffDelete, "`1234567890-=[]\\;',./"}, + {DiffInsert, "~!@#$%^&*()_+{}|:\"<>?"}, + } + + patches, err := dmp.PatchFromText("@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n") + assert.Len(t, patches, 1) + assert.Equal(t, diffs, + patches[0].diffs, + ) + assert.Nil(t, err) +} + +func TestPatchToText(t *testing.T) { + type TestCase struct { + Patch string + } + + dmp := New() + + for i, tc := range []TestCase{ + {"@@ -21,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"}, + {"@@ -1,9 +1,9 @@\n-f\n+F\n oo+fooba\n@@ -7,9 +7,9 @@\n obar\n-,\n+.\n tes\n"}, + } { + patches, err := dmp.PatchFromText(tc.Patch) + assert.Nil(t, err) + + actual := dmp.PatchToText(patches) + assert.Equal(t, tc.Patch, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestPatchAddContext(t *testing.T) { + type TestCase struct { + Name string + + Patch string + Text string + + Expected string + } + + dmp := New() + dmp.PatchMargin = 4 + + for i, tc := range []TestCase{ + {"Simple case", "@@ -21,4 +21,10 @@\n-jump\n+somersault\n", "The quick brown fox jumps over the lazy dog.", "@@ -17,12 +17,18 @@\n fox \n-jump\n+somersault\n s ov\n"}, + {"Not enough trailing context", "@@ -21,4 +21,10 @@\n-jump\n+somersault\n", "The quick brown fox jumps.", "@@ -17,10 +17,16 @@\n fox \n-jump\n+somersault\n s.\n"}, + {"Not enough leading context", "@@ -3 +3,2 @@\n-e\n+at\n", "The quick brown fox jumps.", "@@ -1,7 +1,8 @@\n Th\n-e\n+at\n qui\n"}, + {"Ambiguity", "@@ -3 +3,2 @@\n-e\n+at\n", "The quick brown fox jumps. The quick brown fox crashes.", "@@ -1,27 +1,28 @@\n Th\n-e\n+at\n quick brown fox jumps. \n"}, + } { + patches, err := dmp.PatchFromText(tc.Patch) + assert.Nil(t, err) + + actual := dmp.PatchAddContext(patches[0], tc.Text) + assert.Equal(t, tc.Expected, actual.String(), fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestPatchMakeAndPatchToText(t *testing.T) { + type TestCase struct { + Name string + + Input1 interface{} + Input2 interface{} + Input3 interface{} + + Expected string + } + + dmp := New() + + text1 := "The quick brown fox jumps over the lazy dog." + text2 := "That quick brown fox jumped over a lazy dog." + + for i, tc := range []TestCase{ + {"Null case", "", "", nil, ""}, + {"Text2+Text1 inputs", text2, text1, nil, "@@ -1,8 +1,7 @@\n Th\n-at\n+e\n qui\n@@ -21,17 +21,18 @@\n jump\n-ed\n+s\n over \n-a\n+the\n laz\n"}, + {"Text1+Text2 inputs", text1, text2, nil, "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"}, + {"Diff input", dmp.DiffMain(text1, text2, false), nil, nil, "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"}, + {"Text1+Diff inputs", text1, dmp.DiffMain(text1, text2, false), nil, "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"}, + {"Text1+Text2+Diff inputs (deprecated)", text1, text2, dmp.DiffMain(text1, text2, false), "@@ -1,11 +1,12 @@\n Th\n-e\n+at\n quick b\n@@ -22,18 +22,17 @@\n jump\n-s\n+ed\n over \n-the\n+a\n laz\n"}, + {"Character encoding", "`1234567890-=[]\\;',./", "~!@#$%^&*()_+{}|:\"<>?", nil, "@@ -1,21 +1,21 @@\n-%601234567890-=%5B%5D%5C;',./\n+~!@#$%25%5E&*()_+%7B%7D%7C:%22%3C%3E?\n"}, + {"Long string with repeats", strings.Repeat("abcdef", 100), strings.Repeat("abcdef", 100) + "123", nil, "@@ -573,28 +573,31 @@\n cdefabcdefabcdefabcdefabcdef\n+123\n"}, + {"Corner case of #31 fixed by #32", "2016-09-01T03:07:14.807830741Z", "2016-09-01T03:07:15.154800781Z", nil, "@@ -15,16 +15,16 @@\n 07:1\n+5.15\n 4\n-.\n 80\n+0\n 78\n-3074\n 1Z\n"}, + } { + var patches []Patch + if tc.Input3 != nil { + patches = dmp.PatchMake(tc.Input1, tc.Input2, tc.Input3) + } else if tc.Input2 != nil { + patches = dmp.PatchMake(tc.Input1, tc.Input2) + } else if ps, ok := tc.Input1.([]Patch); ok { + patches = ps + } else { + patches = dmp.PatchMake(tc.Input1) + } + + actual := dmp.PatchToText(patches) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + // Corner case of #28 wrong patch with timeout of 0 + dmp.DiffTimeout = 0 + + text1 = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ut risus et enim consectetur convallis a non ipsum. Sed nec nibh cursus, interdum libero vel." + text2 = "Lorem a ipsum dolor sit amet, consectetur adipiscing elit. Vivamus ut risus et enim consectetur convallis a non ipsum. Sed nec nibh cursus, interdum liberovel." + + diffs := dmp.DiffMain(text1, text2, true) + // Additional check that the diff texts are equal to the originals even if we are using DiffMain with checklines=true #29 + assert.Equal(t, text1, dmp.DiffText1(diffs)) + assert.Equal(t, text2, dmp.DiffText2(diffs)) + + patches := dmp.PatchMake(text1, diffs) + + actual := dmp.PatchToText(patches) + assert.Equal(t, "@@ -1,14 +1,16 @@\n Lorem \n+a \n ipsum do\n@@ -148,13 +148,12 @@\n m libero\n- \n vel.\n", actual) + + // Check that empty Patch array is returned for no parameter call + patches = dmp.PatchMake() + assert.Equal(t, []Patch{}, patches) +} + +func TestPatchSplitMax(t *testing.T) { + type TestCase struct { + Text1 string + Text2 string + + Expected string + } + + dmp := New() + + for i, tc := range []TestCase{ + {"abcdefghijklmnopqrstuvwxyz01234567890", "XabXcdXefXghXijXklXmnXopXqrXstXuvXwxXyzX01X23X45X67X89X0", "@@ -1,32 +1,46 @@\n+X\n ab\n+X\n cd\n+X\n ef\n+X\n gh\n+X\n ij\n+X\n kl\n+X\n mn\n+X\n op\n+X\n qr\n+X\n st\n+X\n uv\n+X\n wx\n+X\n yz\n+X\n 012345\n@@ -25,13 +39,18 @@\n zX01\n+X\n 23\n+X\n 45\n+X\n 67\n+X\n 89\n+X\n 0\n"}, + {"abcdef1234567890123456789012345678901234567890123456789012345678901234567890uvwxyz", "abcdefuvwxyz", "@@ -3,78 +3,8 @@\n cdef\n-1234567890123456789012345678901234567890123456789012345678901234567890\n uvwx\n"}, + {"1234567890123456789012345678901234567890123456789012345678901234567890", "abc", "@@ -1,32 +1,4 @@\n-1234567890123456789012345678\n 9012\n@@ -29,32 +1,4 @@\n-9012345678901234567890123456\n 7890\n@@ -57,14 +1,3 @@\n-78901234567890\n+abc\n"}, + {"abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1 abcdefghij , h : 0 , t : 1", "abcdefghij , h : 1 , t : 1 abcdefghij , h : 1 , t : 1 abcdefghij , h : 0 , t : 1", "@@ -2,32 +2,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n@@ -29,32 +29,32 @@\n bcdefghij , h : \n-0\n+1\n , t : 1 abcdef\n"}, + } { + patches := dmp.PatchMake(tc.Text1, tc.Text2) + patches = dmp.PatchSplitMax(patches) + + actual := dmp.PatchToText(patches) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestPatchAddPadding(t *testing.T) { + type TestCase struct { + Name string + + Text1 string + Text2 string + + Expected string + ExpectedWithPadding string + } + + dmp := New() + + for i, tc := range []TestCase{ + {"Both edges full", "", "test", "@@ -0,0 +1,4 @@\n+test\n", "@@ -1,8 +1,12 @@\n %01%02%03%04\n+test\n %01%02%03%04\n"}, + {"Both edges partial", "XY", "XtestY", "@@ -1,2 +1,6 @@\n X\n+test\n Y\n", "@@ -2,8 +2,12 @@\n %02%03%04X\n+test\n Y%01%02%03\n"}, + {"Both edges none", "XXXXYYYY", "XXXXtestYYYY", "@@ -1,8 +1,12 @@\n XXXX\n+test\n YYYY\n", "@@ -5,8 +5,12 @@\n XXXX\n+test\n YYYY\n"}, + } { + patches := dmp.PatchMake(tc.Text1, tc.Text2) + + actual := dmp.PatchToText(patches) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + + dmp.PatchAddPadding(patches) + + actualWithPadding := dmp.PatchToText(patches) + assert.Equal(t, tc.ExpectedWithPadding, actualWithPadding, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} + +func TestPatchApply(t *testing.T) { + type TestCase struct { + Name string + + Text1 string + Text2 string + TextBase string + + Expected string + ExpectedApplies []bool + } + + dmp := New() + dmp.MatchDistance = 1000 + dmp.MatchThreshold = 0.5 + dmp.PatchDeleteThreshold = 0.5 + + for i, tc := range []TestCase{ + {"Null case", "", "", "Hello world.", "Hello world.", []bool{}}, + {"Exact match", "The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.", "The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.", []bool{true, true}}, + {"Partial match", "The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.", "The quick red rabbit jumps over the tired tiger.", "That quick red rabbit jumped over a tired tiger.", []bool{true, true}}, + {"Failed match", "The quick brown fox jumps over the lazy dog.", "That quick brown fox jumped over a lazy dog.", "I am the very model of a modern major general.", "I am the very model of a modern major general.", []bool{false, false}}, + {"Big delete, small Diff", "x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy", "x123456789012345678901234567890-----++++++++++-----123456789012345678901234567890y", "xabcy", []bool{true, true}}, + {"Big delete, big Diff 1", "x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy", "x12345678901234567890---------------++++++++++---------------12345678901234567890y", "xabc12345678901234567890---------------++++++++++---------------12345678901234567890y", []bool{false, true}}, + } { + patches := dmp.PatchMake(tc.Text1, tc.Text2) + + actual, actualApplies := dmp.PatchApply(patches, tc.TextBase) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + assert.Equal(t, tc.ExpectedApplies, actualApplies, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.PatchDeleteThreshold = 0.6 + + for i, tc := range []TestCase{ + {"Big delete, big Diff 2", "x1234567890123456789012345678901234567890123456789012345678901234567890y", "xabcy", "x12345678901234567890---------------++++++++++---------------12345678901234567890y", "xabcy", []bool{true, true}}, + } { + patches := dmp.PatchMake(tc.Text1, tc.Text2) + + actual, actualApplies := dmp.PatchApply(patches, tc.TextBase) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + assert.Equal(t, tc.ExpectedApplies, actualApplies, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.MatchDistance = 0 + dmp.MatchThreshold = 0.0 + dmp.PatchDeleteThreshold = 0.5 + + for i, tc := range []TestCase{ + {"Compensate for failed patch", "abcdefghijklmnopqrstuvwxyz--------------------1234567890", "abcXXXXXXXXXXdefghijklmnopqrstuvwxyz--------------------1234567YYYYYYYYYY890", "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567890", "ABCDEFGHIJKLMNOPQRSTUVWXYZ--------------------1234567YYYYYYYYYY890", []bool{false, true}}, + } { + patches := dmp.PatchMake(tc.Text1, tc.Text2) + + actual, actualApplies := dmp.PatchApply(patches, tc.TextBase) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + assert.Equal(t, tc.ExpectedApplies, actualApplies, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } + + dmp.MatchThreshold = 0.5 + dmp.MatchDistance = 1000 + + for i, tc := range []TestCase{ + {"No side effects", "", "test", "", "test", []bool{true}}, + {"No side effects with major delete", "The quick brown fox jumps over the lazy dog.", "Woof", "The quick brown fox jumps over the lazy dog.", "Woof", []bool{true, true}}, + {"Edge exact match", "", "test", "", "test", []bool{true}}, + {"Near edge exact match", "XY", "XtestY", "XY", "XtestY", []bool{true}}, + {"Edge partial match", "y", "y123", "x", "x123", []bool{true}}, + } { + patches := dmp.PatchMake(tc.Text1, tc.Text2) + + actual, actualApplies := dmp.PatchApply(patches, tc.TextBase) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + assert.Equal(t, tc.ExpectedApplies, actualApplies, fmt.Sprintf("Test case #%d, %s", i, tc.Name)) + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil_test.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil_test.go new file mode 100644 index 0000000000..ab2bc103fb --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil_test.go @@ -0,0 +1,116 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRunesIndexOf(t *testing.T) { + type TestCase struct { + Pattern string + Start int + + Expected int + } + + for i, tc := range []TestCase{ + {"abc", 0, 0}, + {"cde", 0, 2}, + {"e", 0, 4}, + {"cdef", 0, -1}, + {"abcdef", 0, -1}, + {"abc", 2, -1}, + {"cde", 2, 2}, + {"e", 2, 4}, + {"cdef", 2, -1}, + {"abcdef", 2, -1}, + {"e", 6, -1}, + } { + actual := runesIndexOf([]rune("abcde"), []rune(tc.Pattern), tc.Start) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestIndexOf(t *testing.T) { + type TestCase struct { + String string + Pattern string + Position int + + Expected int + } + + for i, tc := range []TestCase{ + {"hi world", "world", -1, 3}, + {"hi world", "world", 0, 3}, + {"hi world", "world", 1, 3}, + {"hi world", "world", 2, 3}, + {"hi world", "world", 3, 3}, + {"hi world", "world", 4, -1}, + {"abbc", "b", -1, 1}, + {"abbc", "b", 0, 1}, + {"abbc", "b", 1, 1}, + {"abbc", "b", 2, 2}, + {"abbc", "b", 3, -1}, + {"abbc", "b", 4, -1}, + // The greek letter beta is the two-byte sequence of "\u03b2". + {"a\u03b2\u03b2c", "\u03b2", -1, 1}, + {"a\u03b2\u03b2c", "\u03b2", 0, 1}, + {"a\u03b2\u03b2c", "\u03b2", 1, 1}, + {"a\u03b2\u03b2c", "\u03b2", 3, 3}, + {"a\u03b2\u03b2c", "\u03b2", 5, -1}, + {"a\u03b2\u03b2c", "\u03b2", 6, -1}, + } { + actual := indexOf(tc.String, tc.Pattern, tc.Position) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} + +func TestLastIndexOf(t *testing.T) { + type TestCase struct { + String string + Pattern string + Position int + + Expected int + } + + for i, tc := range []TestCase{ + {"hi world", "world", -1, -1}, + {"hi world", "world", 0, -1}, + {"hi world", "world", 1, -1}, + {"hi world", "world", 2, -1}, + {"hi world", "world", 3, -1}, + {"hi world", "world", 4, -1}, + {"hi world", "world", 5, -1}, + {"hi world", "world", 6, -1}, + {"hi world", "world", 7, 3}, + {"hi world", "world", 8, 3}, + {"abbc", "b", -1, -1}, + {"abbc", "b", 0, -1}, + {"abbc", "b", 1, 1}, + {"abbc", "b", 2, 2}, + {"abbc", "b", 3, 2}, + {"abbc", "b", 4, 2}, + // The greek letter beta is the two-byte sequence of "\u03b2". + {"a\u03b2\u03b2c", "\u03b2", -1, -1}, + {"a\u03b2\u03b2c", "\u03b2", 0, -1}, + {"a\u03b2\u03b2c", "\u03b2", 1, 1}, + {"a\u03b2\u03b2c", "\u03b2", 3, 3}, + {"a\u03b2\u03b2c", "\u03b2", 5, 3}, + {"a\u03b2\u03b2c", "\u03b2", 6, 3}, + } { + actual := lastIndexOf(tc.String, tc.Pattern, tc.Position) + assert.Equal(t, tc.Expected, actual, fmt.Sprintf("Test case #%d, %#v", i, tc)) + } +} diff --git a/vendor/github.com/smartystreets/assertions/.gitignore b/vendor/github.com/smartystreets/assertions/.gitignore new file mode 100644 index 0000000000..6ad551742d --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/.gitignore @@ -0,0 +1,3 @@ +.DS_Store +Thumbs.db +/.idea diff --git a/vendor/github.com/smartystreets/assertions/.travis.yml b/vendor/github.com/smartystreets/assertions/.travis.yml new file mode 100644 index 0000000000..44217c9733 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + +install: + - go get -t ./... + +script: go test -v + +sudo: false diff --git a/vendor/github.com/smartystreets/assertions/collections_test.go b/vendor/github.com/smartystreets/assertions/collections_test.go new file mode 100644 index 0000000000..9433ef24bd --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/collections_test.go @@ -0,0 +1,157 @@ +package assertions + +import ( + "fmt" + "testing" + "time" +) + +func TestShouldContainKey(t *testing.T) { + fail(t, so(map[int]int{}, ShouldContainKey), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(map[int]int{}, ShouldContainKey, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(Thing1{}, ShouldContainKey, 1), "You must provide a valid map type (was assertions.Thing1)!") + fail(t, so(nil, ShouldContainKey, 1), "You must provide a valid map type (was )!") + fail(t, so(map[int]int{1: 41}, ShouldContainKey, 2), "Expected the map[int]int to contain the key: [2] (but it didn't)!") + + pass(t, so(map[int]int{1: 41}, ShouldContainKey, 1)) + pass(t, so(map[int]int{1: 41, 2: 42, 3: 43}, ShouldContainKey, 2)) +} + +func TestShouldNotContainKey(t *testing.T) { + fail(t, so(map[int]int{}, ShouldNotContainKey), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(map[int]int{}, ShouldNotContainKey, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(Thing1{}, ShouldNotContainKey, 1), "You must provide a valid map type (was assertions.Thing1)!") + fail(t, so(nil, ShouldNotContainKey, 1), "You must provide a valid map type (was )!") + fail(t, so(map[int]int{1: 41}, ShouldNotContainKey, 1), "Expected the map[int]int NOT to contain the key: [1] (but it did)!") + pass(t, so(map[int]int{1: 41}, ShouldNotContainKey, 2)) +} + +func TestShouldContain(t *testing.T) { + fail(t, so([]int{}, ShouldContain), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so([]int{}, ShouldContain, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(Thing1{}, ShouldContain, 1), "You must provide a valid container (was assertions.Thing1)!") + fail(t, so(nil, ShouldContain, 1), "You must provide a valid container (was )!") + fail(t, so([]int{1}, ShouldContain, 2), "Expected the container ([]int) to contain: '2' (but it didn't)!") + + pass(t, so([]int{1}, ShouldContain, 1)) + pass(t, so([]int{1, 2, 3}, ShouldContain, 2)) +} + +func TestShouldNotContain(t *testing.T) { + fail(t, so([]int{}, ShouldNotContain), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so([]int{}, ShouldNotContain, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(Thing1{}, ShouldNotContain, 1), "You must provide a valid container (was assertions.Thing1)!") + fail(t, so(nil, ShouldNotContain, 1), "You must provide a valid container (was )!") + + fail(t, so([]int{1}, ShouldNotContain, 1), "Expected the container ([]int) NOT to contain: '1' (but it did)!") + fail(t, so([]int{1, 2, 3}, ShouldNotContain, 2), "Expected the container ([]int) NOT to contain: '2' (but it did)!") + + pass(t, so([]int{1}, ShouldNotContain, 2)) +} + +func TestShouldBeIn(t *testing.T) { + fail(t, so(4, ShouldBeIn), needNonEmptyCollection) + + container := []int{1, 2, 3, 4} + pass(t, so(4, ShouldBeIn, container)) + pass(t, so(4, ShouldBeIn, 1, 2, 3, 4)) + + fail(t, so(4, ShouldBeIn, 1, 2, 3), "Expected '4' to be in the container ([]interface {}), but it wasn't!") + fail(t, so(4, ShouldBeIn, []int{1, 2, 3}), "Expected '4' to be in the container ([]int), but it wasn't!") +} + +func TestShouldNotBeIn(t *testing.T) { + fail(t, so(4, ShouldNotBeIn), needNonEmptyCollection) + + container := []int{1, 2, 3, 4} + pass(t, so(42, ShouldNotBeIn, container)) + pass(t, so(42, ShouldNotBeIn, 1, 2, 3, 4)) + + fail(t, so(2, ShouldNotBeIn, 1, 2, 3), "Expected '2' NOT to be in the container ([]interface {}), but it was!") + fail(t, so(2, ShouldNotBeIn, []int{1, 2, 3}), "Expected '2' NOT to be in the container ([]int), but it was!") +} + +func TestShouldBeEmpty(t *testing.T) { + fail(t, so(1, ShouldBeEmpty, 2, 3), "This assertion requires exactly 0 comparison values (you provided 2).") + + pass(t, so([]int{}, ShouldBeEmpty)) // empty slice + pass(t, so([]interface{}{}, ShouldBeEmpty)) // empty slice + pass(t, so(map[string]int{}, ShouldBeEmpty)) // empty map + pass(t, so("", ShouldBeEmpty)) // empty string + pass(t, so(&[]int{}, ShouldBeEmpty)) // pointer to empty slice + pass(t, so(&[0]int{}, ShouldBeEmpty)) // pointer to empty array + pass(t, so(nil, ShouldBeEmpty)) // nil + pass(t, so(make(chan string), ShouldBeEmpty)) // empty channel + + fail(t, so([]int{1}, ShouldBeEmpty), "Expected [1] to be empty (but it wasn't)!") // non-empty slice + fail(t, so([]interface{}{1}, ShouldBeEmpty), "Expected [1] to be empty (but it wasn't)!") // non-empty slice + fail(t, so(map[string]int{"hi": 0}, ShouldBeEmpty), "Expected map[hi:0] to be empty (but it wasn't)!") // non-empty map + fail(t, so("hi", ShouldBeEmpty), "Expected hi to be empty (but it wasn't)!") // non-empty string + fail(t, so(&[]int{1}, ShouldBeEmpty), "Expected &[1] to be empty (but it wasn't)!") // pointer to non-empty slice + fail(t, so(&[1]int{1}, ShouldBeEmpty), "Expected &[1] to be empty (but it wasn't)!") // pointer to non-empty array + c := make(chan int, 1) // non-empty channel + go func() { c <- 1 }() + time.Sleep(time.Millisecond) + fail(t, so(c, ShouldBeEmpty), fmt.Sprintf("Expected %+v to be empty (but it wasn't)!", c)) +} + +func TestShouldNotBeEmpty(t *testing.T) { + fail(t, so(1, ShouldNotBeEmpty, 2, 3), "This assertion requires exactly 0 comparison values (you provided 2).") + + fail(t, so([]int{}, ShouldNotBeEmpty), "Expected [] to NOT be empty (but it was)!") // empty slice + fail(t, so([]interface{}{}, ShouldNotBeEmpty), "Expected [] to NOT be empty (but it was)!") // empty slice + fail(t, so(map[string]int{}, ShouldNotBeEmpty), "Expected map[] to NOT be empty (but it was)!") // empty map + fail(t, so("", ShouldNotBeEmpty), "Expected to NOT be empty (but it was)!") // empty string + fail(t, so(&[]int{}, ShouldNotBeEmpty), "Expected &[] to NOT be empty (but it was)!") // pointer to empty slice + fail(t, so(&[0]int{}, ShouldNotBeEmpty), "Expected &[] to NOT be empty (but it was)!") // pointer to empty array + fail(t, so(nil, ShouldNotBeEmpty), "Expected to NOT be empty (but it was)!") // nil + c := make(chan int, 0) // non-empty channel + fail(t, so(c, ShouldNotBeEmpty), fmt.Sprintf("Expected %+v to NOT be empty (but it was)!", c)) // empty channel + + pass(t, so([]int{1}, ShouldNotBeEmpty)) // non-empty slice + pass(t, so([]interface{}{1}, ShouldNotBeEmpty)) // non-empty slice + pass(t, so(map[string]int{"hi": 0}, ShouldNotBeEmpty)) // non-empty map + pass(t, so("hi", ShouldNotBeEmpty)) // non-empty string + pass(t, so(&[]int{1}, ShouldNotBeEmpty)) // pointer to non-empty slice + pass(t, so(&[1]int{1}, ShouldNotBeEmpty)) // pointer to non-empty array + c = make(chan int, 1) + go func() { c <- 1 }() + time.Sleep(time.Millisecond) + pass(t, so(c, ShouldNotBeEmpty)) +} + +func TestShouldHaveLength(t *testing.T) { + fail(t, so(1, ShouldHaveLength, 2), "You must provide a valid container (was int)!") + fail(t, so(nil, ShouldHaveLength, 1), "You must provide a valid container (was )!") + fail(t, so("hi", ShouldHaveLength, float64(1.0)), "You must provide a valid integer (was float64)!") + fail(t, so([]string{}, ShouldHaveLength), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so([]string{}, ShouldHaveLength, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).") + fail(t, so([]string{}, ShouldHaveLength, -10), "You must provide a valid positive integer (was -10)!") + + fail(t, so([]int{}, ShouldHaveLength, 1), "Expected [] to have length equal to '1', but it wasn't!") // empty slice + fail(t, so([]interface{}{}, ShouldHaveLength, 1), "Expected [] to have length equal to '1', but it wasn't!") // empty slice + fail(t, so(map[string]int{}, ShouldHaveLength, 1), "Expected map[] to have length equal to '1', but it wasn't!") // empty map + fail(t, so("", ShouldHaveLength, 1), "Expected to have length equal to '1', but it wasn't!") // empty string + fail(t, so(&[]int{}, ShouldHaveLength, 1), "Expected &[] to have length equal to '1', but it wasn't!") // pointer to empty slice + fail(t, so(&[0]int{}, ShouldHaveLength, 1), "Expected &[] to have length equal to '1', but it wasn't!") // pointer to empty array + c := make(chan int, 0) // non-empty channel + fail(t, so(c, ShouldHaveLength, 1), fmt.Sprintf("Expected %+v to have length equal to '1', but it wasn't!", c)) + c = make(chan int) // empty channel + fail(t, so(c, ShouldHaveLength, 1), fmt.Sprintf("Expected %+v to have length equal to '1', but it wasn't!", c)) + + pass(t, so([]int{1}, ShouldHaveLength, 1)) // non-empty slice + pass(t, so([]interface{}{1}, ShouldHaveLength, 1)) // non-empty slice + pass(t, so(map[string]int{"hi": 0}, ShouldHaveLength, 1)) // non-empty map + pass(t, so("hi", ShouldHaveLength, 2)) // non-empty string + pass(t, so(&[]int{1}, ShouldHaveLength, 1)) // pointer to non-empty slice + pass(t, so(&[1]int{1}, ShouldHaveLength, 1)) // pointer to non-empty array + c = make(chan int, 1) + go func() { c <- 1 }() + time.Sleep(time.Millisecond) + pass(t, so(c, ShouldHaveLength, 1)) + +} diff --git a/vendor/github.com/smartystreets/assertions/doc_test.go b/vendor/github.com/smartystreets/assertions/doc_test.go new file mode 100644 index 0000000000..041faaffcb --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/doc_test.go @@ -0,0 +1,57 @@ +package assertions + +import ( + "bytes" + "fmt" + "testing" +) + +func TestPassingAssertion(t *testing.T) { + fake := &FakeT{buffer: new(bytes.Buffer)} + assertion := New(fake) + passed := assertion.So(1, ShouldEqual, 1) + + if !passed { + t.Error("Assertion failed when it should have passed.") + } + if fake.buffer.Len() > 0 { + t.Error("Unexpected error message was printed.") + } +} + +func TestFailingAssertion(t *testing.T) { + fake := &FakeT{buffer: new(bytes.Buffer)} + assertion := New(fake) + passed := assertion.So(1, ShouldEqual, 2) + + if passed { + t.Error("Assertion passed when it should have failed.") + } + if fake.buffer.Len() == 0 { + t.Error("Expected error message not printed.") + } +} + +func TestFailingGroupsOfAssertions(t *testing.T) { + fake := &FakeT{buffer: new(bytes.Buffer)} + assertion1 := New(fake) + assertion2 := New(fake) + + assertion1.So(1, ShouldEqual, 2) // fail + assertion2.So(1, ShouldEqual, 1) // pass + + if !assertion1.Failed() { + t.Error("Expected the first assertion to have been marked as failed.") + } + if assertion2.Failed() { + t.Error("Expected the second assertion to NOT have been marked as failed.") + } +} + +type FakeT struct { + buffer *bytes.Buffer +} + +func (this *FakeT) Error(args ...interface{}) { + fmt.Fprint(this.buffer, args...) +} diff --git a/vendor/github.com/smartystreets/assertions/equality_test.go b/vendor/github.com/smartystreets/assertions/equality_test.go new file mode 100644 index 0000000000..5050e4b161 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/equality_test.go @@ -0,0 +1,269 @@ +package assertions + +import ( + "fmt" + "reflect" + "testing" +) + +func TestShouldEqual(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so(1, ShouldEqual), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldEqual, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).") + fail(t, so(1, ShouldEqual, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + pass(t, so(1, ShouldEqual, 1)) + fail(t, so(1, ShouldEqual, 2), "2|1|Expected: '2' Actual: '1' (Should be equal)") + fail(t, so(1, ShouldEqual, "1"), "1|1|Expected: '1' (string) Actual: '1' (int) (Should be equal, type mismatch)") + + pass(t, so(true, ShouldEqual, true)) + fail(t, so(true, ShouldEqual, false), "false|true|Expected: 'false' Actual: 'true' (Should be equal)") + + pass(t, so("hi", ShouldEqual, "hi")) + fail(t, so("hi", ShouldEqual, "bye"), "bye|hi|Expected: 'bye' Actual: 'hi' (Should be equal)") + + pass(t, so(42, ShouldEqual, uint(42))) + + fail(t, so(Thing1{"hi"}, ShouldEqual, Thing1{}), "{}|{hi}|Expected: '{}' Actual: '{hi}' (Should be equal)") + fail(t, so(Thing1{"hi"}, ShouldEqual, Thing1{"hi"}), "{hi}|{hi}|Expected: '{hi}' Actual: '{hi}' (Should be equal)") + fail(t, so(&Thing1{"hi"}, ShouldEqual, &Thing1{"hi"}), "&{hi}|&{hi}|Expected: '&{hi}' Actual: '&{hi}' (Should be equal)") + + fail(t, so(Thing1{}, ShouldEqual, Thing2{}), "{}|{}|Expected: '{}' Actual: '{}' (Should be equal)") +} + +func TestShouldNotEqual(t *testing.T) { + fail(t, so(1, ShouldNotEqual), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldNotEqual, 1, 2), "This assertion requires exactly 1 comparison values (you provided 2).") + fail(t, so(1, ShouldNotEqual, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + pass(t, so(1, ShouldNotEqual, 2)) + pass(t, so(1, ShouldNotEqual, "1")) + fail(t, so(1, ShouldNotEqual, 1), "Expected '1' to NOT equal '1' (but it did)!") + + pass(t, so(true, ShouldNotEqual, false)) + fail(t, so(true, ShouldNotEqual, true), "Expected 'true' to NOT equal 'true' (but it did)!") + + pass(t, so("hi", ShouldNotEqual, "bye")) + fail(t, so("hi", ShouldNotEqual, "hi"), "Expected 'hi' to NOT equal 'hi' (but it did)!") + + pass(t, so(&Thing1{"hi"}, ShouldNotEqual, &Thing1{"hi"})) + pass(t, so(Thing1{"hi"}, ShouldNotEqual, Thing1{"hi"})) + pass(t, so(Thing1{}, ShouldNotEqual, Thing1{})) + pass(t, so(Thing1{}, ShouldNotEqual, Thing2{})) +} + +func TestShouldAlmostEqual(t *testing.T) { + fail(t, so(1, ShouldAlmostEqual), "This assertion requires exactly one comparison value and an optional delta (you provided neither)") + fail(t, so(1, ShouldAlmostEqual, 1, 2, 3), "This assertion requires exactly one comparison value and an optional delta (you provided more values)") + + // with the default delta + pass(t, so(1, ShouldAlmostEqual, .99999999999999)) + pass(t, so(1.3612499999999996, ShouldAlmostEqual, 1.36125)) + pass(t, so(0.7285312499999999, ShouldAlmostEqual, 0.72853125)) + fail(t, so(1, ShouldAlmostEqual, .99), "Expected '1' to almost equal '0.99' (but it didn't)!") + + // with a different delta + pass(t, so(100.0, ShouldAlmostEqual, 110.0, 10.0)) + fail(t, so(100.0, ShouldAlmostEqual, 111.0, 10.5), "Expected '100' to almost equal '111' (but it didn't)!") + + // ints should work + pass(t, so(100, ShouldAlmostEqual, 100.0)) + fail(t, so(100, ShouldAlmostEqual, 99.0), "Expected '100' to almost equal '99' (but it didn't)!") + + // float32 should work + pass(t, so(float64(100.0), ShouldAlmostEqual, float32(100.0))) + fail(t, so(float32(100.0), ShouldAlmostEqual, 99.0, float32(0.1)), "Expected '100' to almost equal '99' (but it didn't)!") +} + +func TestShouldNotAlmostEqual(t *testing.T) { + fail(t, so(1, ShouldNotAlmostEqual), "This assertion requires exactly one comparison value and an optional delta (you provided neither)") + fail(t, so(1, ShouldNotAlmostEqual, 1, 2, 3), "This assertion requires exactly one comparison value and an optional delta (you provided more values)") + + // with the default delta + fail(t, so(1, ShouldNotAlmostEqual, .99999999999999), "Expected '1' to NOT almost equal '0.99999999999999' (but it did)!") + fail(t, so(1.3612499999999996, ShouldNotAlmostEqual, 1.36125), "Expected '1.3612499999999996' to NOT almost equal '1.36125' (but it did)!") + pass(t, so(1, ShouldNotAlmostEqual, .99)) + + // with a different delta + fail(t, so(100.0, ShouldNotAlmostEqual, 110.0, 10.0), "Expected '100' to NOT almost equal '110' (but it did)!") + pass(t, so(100.0, ShouldNotAlmostEqual, 111.0, 10.5)) + + // ints should work + fail(t, so(100, ShouldNotAlmostEqual, 100.0), "Expected '100' to NOT almost equal '100' (but it did)!") + pass(t, so(100, ShouldNotAlmostEqual, 99.0)) + + // float32 should work + fail(t, so(float64(100.0), ShouldNotAlmostEqual, float32(100.0)), "Expected '100' to NOT almost equal '100' (but it did)!") + pass(t, so(float32(100.0), ShouldNotAlmostEqual, 99.0, float32(0.1))) +} + +func TestShouldResemble(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so(Thing1{"hi"}, ShouldResemble), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(Thing1{"hi"}, ShouldResemble, Thing1{"hi"}, Thing1{"hi"}), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(Thing1{"hi"}, ShouldResemble, Thing1{"hi"})) + fail(t, so(Thing1{"hi"}, ShouldResemble, Thing1{"bye"}), `{bye}|{hi}|Expected: '"assertions.Thing1{a:\"bye\"}"' Actual: '"assertions.Thing1{a:\"hi\"}"' (Should resemble)!`) + + var ( + a []int + b []int = []int{} + ) + + fail(t, so(a, ShouldResemble, b), `[]|[]|Expected: '"[]int{}"' Actual: '"[]int(nil)"' (Should resemble)!`) + fail(t, so(2, ShouldResemble, 1), `1|2|Expected: '"1"' Actual: '"2"' (Should resemble)!`) + + fail(t, so(StringStringMapAlias{"hi": "bye"}, ShouldResemble, map[string]string{"hi": "bye"}), + `map[hi:bye]|map[hi:bye]|Expected: '"map[string]string{\"hi\":\"bye\"}"' Actual: '"assertions.StringStringMapAlias{\"hi\":\"bye\"}"' (Should resemble)!`) + fail(t, so(StringSliceAlias{"hi", "bye"}, ShouldResemble, []string{"hi", "bye"}), + `[hi bye]|[hi bye]|Expected: '"[]string{\"hi\", \"bye\"}"' Actual: '"assertions.StringSliceAlias{\"hi\", \"bye\"}"' (Should resemble)!`) + + // some types come out looking the same when represented with "%#v" so we show type mismatch info: + fail(t, so(StringAlias("hi"), ShouldResemble, "hi"), `hi|hi|Expected: '"\"hi\""' Actual: '"assertions.StringAlias(\"hi\")"' (Should resemble)!`) + fail(t, so(IntAlias(42), ShouldResemble, 42), `42|42|Expected: '"42"' Actual: '"assertions.IntAlias(42)"' (Should resemble)!`) +} + +func TestShouldNotResemble(t *testing.T) { + fail(t, so(Thing1{"hi"}, ShouldNotResemble), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(Thing1{"hi"}, ShouldNotResemble, Thing1{"hi"}, Thing1{"hi"}), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(Thing1{"hi"}, ShouldNotResemble, Thing1{"bye"})) + fail(t, so(Thing1{"hi"}, ShouldNotResemble, Thing1{"hi"}), + `Expected '"assertions.Thing1{a:\"hi\"}"' to NOT resemble '"assertions.Thing1{a:\"hi\"}"' (but it did)!`) + + pass(t, so(map[string]string{"hi": "bye"}, ShouldResemble, map[string]string{"hi": "bye"})) + pass(t, so(IntAlias(42), ShouldNotResemble, 42)) + + pass(t, so(StringSliceAlias{"hi", "bye"}, ShouldNotResemble, []string{"hi", "bye"})) +} + +func TestShouldPointTo(t *testing.T) { + serializer = newFakeSerializer() + + t1 := &Thing1{} + t2 := t1 + t3 := &Thing1{} + + pointer1 := reflect.ValueOf(t1).Pointer() + pointer3 := reflect.ValueOf(t3).Pointer() + + fail(t, so(t1, ShouldPointTo), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(t1, ShouldPointTo, t2, t3), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(t1, ShouldPointTo, t2)) + fail(t, so(t1, ShouldPointTo, t3), fmt.Sprintf( + "%v|%v|Expected '&{a:}' (address: '%v') and '&{a:}' (address: '%v') to be the same address (but their weren't)!", + pointer3, pointer1, pointer1, pointer3)) + + t4 := Thing1{} + t5 := t4 + + fail(t, so(t4, ShouldPointTo, t5), "Both arguments should be pointers (the first was not)!") + fail(t, so(&t4, ShouldPointTo, t5), "Both arguments should be pointers (the second was not)!") + fail(t, so(nil, ShouldPointTo, nil), "Both arguments should be pointers (the first was nil)!") + fail(t, so(&t4, ShouldPointTo, nil), "Both arguments should be pointers (the second was nil)!") +} + +func TestShouldNotPointTo(t *testing.T) { + t1 := &Thing1{} + t2 := t1 + t3 := &Thing1{} + + pointer1 := reflect.ValueOf(t1).Pointer() + + fail(t, so(t1, ShouldNotPointTo), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(t1, ShouldNotPointTo, t2, t3), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(t1, ShouldNotPointTo, t3)) + fail(t, so(t1, ShouldNotPointTo, t2), fmt.Sprintf("Expected '&{a:}' and '&{a:}' to be different references (but they matched: '%v')!", pointer1)) + + t4 := Thing1{} + t5 := t4 + + fail(t, so(t4, ShouldNotPointTo, t5), "Both arguments should be pointers (the first was not)!") + fail(t, so(&t4, ShouldNotPointTo, t5), "Both arguments should be pointers (the second was not)!") + fail(t, so(nil, ShouldNotPointTo, nil), "Both arguments should be pointers (the first was nil)!") + fail(t, so(&t4, ShouldNotPointTo, nil), "Both arguments should be pointers (the second was nil)!") +} + +func TestShouldBeNil(t *testing.T) { + fail(t, so(nil, ShouldBeNil, nil, nil, nil), "This assertion requires exactly 0 comparison values (you provided 3).") + fail(t, so(nil, ShouldBeNil, nil), "This assertion requires exactly 0 comparison values (you provided 1).") + + pass(t, so(nil, ShouldBeNil)) + fail(t, so(1, ShouldBeNil), "Expected: nil Actual: '1'") + + var thing Thinger + pass(t, so(thing, ShouldBeNil)) + thing = &Thing{} + fail(t, so(thing, ShouldBeNil), "Expected: nil Actual: '&{}'") + + var thingOne *Thing1 + pass(t, so(thingOne, ShouldBeNil)) + + var nilSlice []int = nil + pass(t, so(nilSlice, ShouldBeNil)) + + var nilMap map[string]string = nil + pass(t, so(nilMap, ShouldBeNil)) + + var nilChannel chan int = nil + pass(t, so(nilChannel, ShouldBeNil)) + + var nilFunc func() = nil + pass(t, so(nilFunc, ShouldBeNil)) + + var nilInterface interface{} = nil + pass(t, so(nilInterface, ShouldBeNil)) +} + +func TestShouldNotBeNil(t *testing.T) { + fail(t, so(nil, ShouldNotBeNil, nil, nil, nil), "This assertion requires exactly 0 comparison values (you provided 3).") + fail(t, so(nil, ShouldNotBeNil, nil), "This assertion requires exactly 0 comparison values (you provided 1).") + + fail(t, so(nil, ShouldNotBeNil), "Expected '' to NOT be nil (but it was)!") + pass(t, so(1, ShouldNotBeNil)) + + var thing Thinger + fail(t, so(thing, ShouldNotBeNil), "Expected '' to NOT be nil (but it was)!") + thing = &Thing{} + pass(t, so(thing, ShouldNotBeNil)) +} + +func TestShouldBeTrue(t *testing.T) { + fail(t, so(true, ShouldBeTrue, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + fail(t, so(true, ShouldBeTrue, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + + fail(t, so(false, ShouldBeTrue), "Expected: true Actual: false") + fail(t, so(1, ShouldBeTrue), "Expected: true Actual: 1") + pass(t, so(true, ShouldBeTrue)) +} + +func TestShouldBeFalse(t *testing.T) { + fail(t, so(false, ShouldBeFalse, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + fail(t, so(false, ShouldBeFalse, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + + fail(t, so(true, ShouldBeFalse), "Expected: false Actual: true") + fail(t, so(1, ShouldBeFalse), "Expected: false Actual: 1") + pass(t, so(false, ShouldBeFalse)) +} + +func TestShouldBeZeroValue(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so(0, ShouldBeZeroValue, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + fail(t, so(false, ShouldBeZeroValue, true), "This assertion requires exactly 0 comparison values (you provided 1).") + + fail(t, so(1, ShouldBeZeroValue), "0|1|'1' should have been the zero value") //"Expected: (zero value) Actual: 1") + fail(t, so(true, ShouldBeZeroValue), "false|true|'true' should have been the zero value") //"Expected: (zero value) Actual: true") + fail(t, so("123", ShouldBeZeroValue), "|123|'123' should have been the zero value") //"Expected: (zero value) Actual: 123") + fail(t, so(" ", ShouldBeZeroValue), "| |' ' should have been the zero value") //"Expected: (zero value) Actual: ") + fail(t, so([]string{"Nonempty"}, ShouldBeZeroValue), "[]|[Nonempty]|'[Nonempty]' should have been the zero value") //"Expected: (zero value) Actual: [Nonempty]") + fail(t, so(struct{ a string }{a: "asdf"}, ShouldBeZeroValue), "{}|{asdf}|'{a:asdf}' should have been the zero value") + pass(t, so(0, ShouldBeZeroValue)) + pass(t, so(false, ShouldBeZeroValue)) + pass(t, so("", ShouldBeZeroValue)) + pass(t, so(struct{}{}, ShouldBeZeroValue)) +} \ No newline at end of file diff --git a/vendor/github.com/smartystreets/assertions/internal/Makefile b/vendor/github.com/smartystreets/assertions/internal/Makefile new file mode 100644 index 0000000000..0894b82bd8 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/Makefile @@ -0,0 +1,23 @@ +# This Makefile pulls the latest oglematchers (with dependencies), +# rewrites the imports to match this location, +# and ensures that all the tests pass. + +go: clean clone rewrite + +clean: + rm -rf ogle* + rm -rf reqtrace + rm -rf go-render + +clone: + git clone https://github.com/jacobsa/ogletest.git && rm -rf ogletest/.git + git clone https://github.com/jacobsa/oglemock.git && rm -rf oglemock/.git + git clone https://github.com/jacobsa/oglematchers.git && rm -rf oglematchers/.git + git clone https://github.com/jacobsa/reqtrace.git && rm -rf reqtrace/.git + git clone https://github.com/luci/go-render.git && rm -rf go-render/.git + +rewrite: + grep -rl --exclude Makefile 'github.com/jacobsa' . | xargs sed -i '' 's#github.com/jacobsa#github.com/smartystreets/assertions/internal#g' + +test: + go test github.com/smartystreets/assertions/... diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/.travis.yml b/vendor/github.com/smartystreets/assertions/internal/go-render/.travis.yml new file mode 100644 index 0000000000..5a19a5faf3 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/go-render/.travis.yml @@ -0,0 +1,21 @@ +# Copyright (c) 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# {sudo: required, dist: trusty} is the magic incantation to pick the trusty +# beta environment, which is the only environment we can get that has >4GB +# memory. Currently the `go test -race` tests that we run will peak at just +# over 4GB, which results in everything getting OOM-killed. +sudo: required +dist: trusty + +language: go + +go: +- 1.4.2 + +before_install: + - go get github.com/maruel/pre-commit-go/cmd/pcg + +script: + - pcg diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py b/vendor/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py new file mode 100644 index 0000000000..d05f0cd873 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/go-render/PRESUBMIT.py @@ -0,0 +1,109 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +"""Top-level presubmit script. + +See https://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for +details on the presubmit API built into depot_tools. +""" + +import os +import sys + + +def PreCommitGo(input_api, output_api, pcg_mode): + """Run go-specific checks via pre-commit-go (pcg) if it's in PATH.""" + if input_api.is_committing: + error_type = output_api.PresubmitError + else: + error_type = output_api.PresubmitPromptWarning + + exe = 'pcg.exe' if sys.platform == 'win32' else 'pcg' + pcg = None + for p in os.environ['PATH'].split(os.pathsep): + pcg = os.path.join(p, exe) + if os.access(pcg, os.X_OK): + break + else: + return [ + error_type( + 'pre-commit-go executable (pcg) could not be found in PATH. All Go ' + 'checks are skipped. See https://github.com/maruel/pre-commit-go.') + ] + + cmd = [pcg, 'run', '-m', ','.join(pcg_mode)] + if input_api.verbose: + cmd.append('-v') + # pcg can figure out what files to check on its own based on upstream ref, + # but on PRESUBMIT try builder upsteram isn't set, and it's just 1 commit. + if os.getenv('PRESUBMIT_BUILDER', ''): + cmd.extend(['-r', 'HEAD~1']) + return input_api.RunTests([ + input_api.Command( + name='pre-commit-go: %s' % ', '.join(pcg_mode), + cmd=cmd, + kwargs={}, + message=error_type), + ]) + + +def header(input_api): + """Returns the expected license header regexp for this project.""" + current_year = int(input_api.time.strftime('%Y')) + allowed_years = (str(s) for s in reversed(xrange(2011, current_year + 1))) + years_re = '(' + '|'.join(allowed_years) + ')' + license_header = ( + r'.*? Copyright %(year)s The Chromium Authors\. ' + r'All rights reserved\.\n' + r'.*? Use of this source code is governed by a BSD-style license ' + r'that can be\n' + r'.*? found in the LICENSE file\.(?: \*/)?\n' + ) % { + 'year': years_re, + } + return license_header + + +def source_file_filter(input_api): + """Returns filter that selects source code files only.""" + bl = list(input_api.DEFAULT_BLACK_LIST) + [ + r'.+\.pb\.go$', + r'.+_string\.go$', + ] + wl = list(input_api.DEFAULT_WHITE_LIST) + [ + r'.+\.go$', + ] + return lambda x: input_api.FilterSourceFile(x, white_list=wl, black_list=bl) + + +def CommonChecks(input_api, output_api): + results = [] + results.extend( + input_api.canned_checks.CheckChangeHasNoStrayWhitespace( + input_api, output_api, + source_file_filter=source_file_filter(input_api))) + results.extend( + input_api.canned_checks.CheckLicense( + input_api, output_api, header(input_api), + source_file_filter=source_file_filter(input_api))) + return results + + +def CheckChangeOnUpload(input_api, output_api): + results = CommonChecks(input_api, output_api) + results.extend(PreCommitGo(input_api, output_api, ['lint', 'pre-commit'])) + return results + + +def CheckChangeOnCommit(input_api, output_api): + results = CommonChecks(input_api, output_api) + results.extend(input_api.canned_checks.CheckChangeHasDescription( + input_api, output_api)) + results.extend(input_api.canned_checks.CheckDoNotSubmitInDescription( + input_api, output_api)) + results.extend(input_api.canned_checks.CheckDoNotSubmitInFiles( + input_api, output_api)) + results.extend(PreCommitGo( + input_api, output_api, ['continuous-integration'])) + return results diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/README.md b/vendor/github.com/smartystreets/assertions/internal/go-render/README.md new file mode 100644 index 0000000000..a85380c421 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/go-render/README.md @@ -0,0 +1,78 @@ +go-render: A verbose recursive Go type-to-string conversion library. +==================================================================== + +[![GoDoc](https://godoc.org/github.com/luci/go-render?status.svg)](https://godoc.org/github.com/luci/go-render) +[![Build Status](https://travis-ci.org/luci/go-render.svg)](https://travis-ci.org/luci/go-render) + +This is not an official Google product. + +## Overview + +The *render* package implements a more verbose form of the standard Go string +formatter, `fmt.Sprintf("%#v", value)`, adding: + - Pointer recursion. Normally, Go stops at the first pointer and prints its + address. The *render* package will recurse and continue to render pointer + values. + - Recursion loop detection. Recursion is nice, but if a recursion path detects + a loop, *render* will note this and move on. + - Custom type name rendering. + - Deterministic key sorting for `string`- and `int`-keyed maps. + - Testing! + +Call `render.Render` and pass it an `interface{}`. + +For example: + +```Go +type customType int +type testStruct struct { + S string + V *map[string]int + I interface{} +} + +a := testStruct{ + S: "hello", + V: &map[string]int{"foo": 0, "bar": 1}, + I: customType(42), +} + +fmt.Println("Render test:") +fmt.Printf("fmt.Printf: %#v\n", a))) +fmt.Printf("render.Render: %s\n", Render(a)) +``` + +Yields: +``` +fmt.Printf: render.testStruct{S:"hello", V:(*map[string]int)(0x600dd065), I:42} +render.Render: render.testStruct{S:"hello", V:(*map[string]int){"bar":1, "foo":0}, I:render.customType(42)} +``` + +This is not intended to be a high-performance library, but it's not terrible +either. + +Contributing +------------ + + * Sign the [Google CLA](https://cla.developers.google.com/clas). + * Make sure your `user.email` and `user.name` are configured in `git config`. + * Install the [pcg](https://github.com/maruel/pre-commit-go) git hook: + `go get -u github.com/maruel/pre-commit-go/cmd/... && pcg` + +Run the following to setup the code review tool and create your first review: + + git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git $HOME/src/depot_tools + export PATH="$PATH:$HOME/src/depot_tools" + cd $GOROOT/github.com/luci/go-render + git checkout -b work origin/master + + # hack hack + + git commit -a -m "This is awesome\nR=joe@example.com" + # This will ask for your Google Account credentials. + git cl upload -s + # Wait for LGTM over email. + # Check the commit queue box in codereview website. + # Wait for the change to be tested and landed automatically. + +Use `git cl help` and `git cl help ` for more details. diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS b/vendor/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS new file mode 100644 index 0000000000..e4172088dd --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/go-render/WATCHLISTS @@ -0,0 +1,26 @@ +# Copyright 2015 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +# Watchlist Rules +# Refer: http://dev.chromium.org/developers/contributing-code/watchlists + +{ + + 'WATCHLIST_DEFINITIONS': { + 'all': { + 'filepath': '.+', + }, + }, + + 'WATCHLISTS': { + 'all': [ + # Add yourself here to get explicitly spammed. + 'maruel@chromium.org', + 'tandrii+luci-go@chromium.org', + 'todd@cloudera.com', + 'andrew.wang@cloudera.com', + ], + }, + +} diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml b/vendor/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml new file mode 100644 index 0000000000..074ee1f84d --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/go-render/pre-commit-go.yml @@ -0,0 +1,78 @@ +# https://github.com/maruel/pre-commit-go configuration file to run checks +# automatically on commit, on push and on continuous integration service after +# a push or on merge of a pull request. +# +# See https://godoc.org/github.com/maruel/pre-commit-go/checks for more +# information. + +min_version: 0.4.7 +modes: + continuous-integration: + checks: + build: + - build_all: false + extra_args: [] + coverage: + - use_global_inference: false + use_coveralls: true + global: + min_coverage: 50 + max_coverage: 100 + per_dir_default: + min_coverage: 1 + max_coverage: 100 + per_dir: {} + gofmt: + - {} + goimports: + - {} + test: + - extra_args: + - -v + - -race + max_duration: 600 + lint: + checks: + golint: + - blacklist: [] + govet: + - blacklist: + - ' composite literal uses unkeyed fields' + max_duration: 15 + pre-commit: + checks: + build: + - build_all: false + extra_args: [] + gofmt: + - {} + test: + - extra_args: + - -short + max_duration: 35 + pre-push: + checks: + coverage: + - use_global_inference: false + use_coveralls: false + global: + min_coverage: 50 + max_coverage: 100 + per_dir_default: + min_coverage: 1 + max_coverage: 100 + per_dir: {} + goimports: + - {} + test: + - extra_args: + - -v + - -race + max_duration: 35 + +ignore_patterns: +- .* +- _* +- '*.pb.go' +- '*_string.go' +- '*-gen.go' diff --git a/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_test.go b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_test.go new file mode 100644 index 0000000000..1737cb702a --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/go-render/render/render_test.go @@ -0,0 +1,170 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package render + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "testing" +) + +func init() { + // For testing purposes, pointers will render as "PTR" so that they are + // deterministic. + renderPointer = func(buf *bytes.Buffer, p uintptr) { + buf.WriteString("PTR") + } +} + +func assertRendersLike(t *testing.T, name string, v interface{}, exp string) { + act := Render(v) + if act != exp { + _, _, line, _ := runtime.Caller(1) + t.Errorf("On line #%d, [%s] did not match expectations:\nExpected: %s\nActual : %s\n", line, name, exp, act) + } +} + +func TestRenderList(t *testing.T) { + t.Parallel() + + // Note that we make some of the fields exportable. This is to avoid a fun case + // where the first reflect.Value has a read-only bit set, but follow-on values + // do not, so recursion tests are off by one. + type testStruct struct { + Name string + I interface{} + + m string + } + + type myStringSlice []string + type myStringMap map[string]string + type myIntType int + type myStringType string + + s0 := "string0" + s0P := &s0 + mit := myIntType(42) + stringer := fmt.Stringer(nil) + + for i, tc := range []struct { + a interface{} + s string + }{ + {nil, `nil`}, + {make(chan int), `(chan int)(PTR)`}, + {&stringer, `(*fmt.Stringer)(nil)`}, + {123, `123`}, + {"hello", `"hello"`}, + {(*testStruct)(nil), `(*render.testStruct)(nil)`}, + {(**testStruct)(nil), `(**render.testStruct)(nil)`}, + {[]***testStruct(nil), `[]***render.testStruct(nil)`}, + {testStruct{Name: "foo", I: &testStruct{Name: "baz"}}, + `render.testStruct{Name:"foo", I:(*render.testStruct){Name:"baz", I:interface{}(nil), m:""}, m:""}`}, + {[]byte(nil), `[]uint8(nil)`}, + {[]byte{}, `[]uint8{}`}, + {map[string]string(nil), `map[string]string(nil)`}, + {[]*testStruct{ + {Name: "foo"}, + {Name: "bar"}, + }, `[]*render.testStruct{(*render.testStruct){Name:"foo", I:interface{}(nil), m:""}, ` + + `(*render.testStruct){Name:"bar", I:interface{}(nil), m:""}}`}, + {myStringSlice{"foo", "bar"}, `render.myStringSlice{"foo", "bar"}`}, + {myStringMap{"foo": "bar"}, `render.myStringMap{"foo":"bar"}`}, + {myIntType(12), `render.myIntType(12)`}, + {&mit, `(*render.myIntType)(42)`}, + {myStringType("foo"), `render.myStringType("foo")`}, + {struct { + a int + b string + }{123, "foo"}, `struct { a int; b string }{a:123, b:"foo"}`}, + {[]string{"foo", "foo", "bar", "baz", "qux", "qux"}, + `[]string{"foo", "foo", "bar", "baz", "qux", "qux"}`}, + {[...]int{1, 2, 3}, `[3]int{1, 2, 3}`}, + {map[string]bool{ + "foo": true, + "bar": false, + }, `map[string]bool{"bar":false, "foo":true}`}, + {map[int]string{1: "foo", 2: "bar"}, `map[int]string{1:"foo", 2:"bar"}`}, + {uint32(1337), `1337`}, + {3.14, `3.14`}, + {complex(3, 0.14), `(3+0.14i)`}, + {&s0, `(*string)("string0")`}, + {&s0P, `(**string)("string0")`}, + {[]interface{}{nil, 1, 2, nil}, `[]interface{}{interface{}(nil), 1, 2, interface{}(nil)}`}, + } { + assertRendersLike(t, fmt.Sprintf("Input #%d", i), tc.a, tc.s) + } +} + +func TestRenderRecursiveStruct(t *testing.T) { + type testStruct struct { + Name string + I interface{} + } + + s := &testStruct{ + Name: "recursive", + } + s.I = s + + assertRendersLike(t, "Recursive struct", s, + `(*render.testStruct){Name:"recursive", I:}`) +} + +func TestRenderRecursiveArray(t *testing.T) { + a := [2]interface{}{} + a[0] = &a + a[1] = &a + + assertRendersLike(t, "Recursive array", &a, + `(*[2]interface{}){, }`) +} + +func TestRenderRecursiveMap(t *testing.T) { + m := map[string]interface{}{} + foo := "foo" + m["foo"] = m + m["bar"] = [](*string){&foo, &foo} + v := []map[string]interface{}{m, m} + + assertRendersLike(t, "Recursive map", v, + `[]map[string]interface{}{map[string]interface{}{`+ + `"bar":[]*string{(*string)("foo"), (*string)("foo")}, `+ + `"foo":}, `+ + `map[string]interface{}{`+ + `"bar":[]*string{(*string)("foo"), (*string)("foo")}, `+ + `"foo":}}`) +} + +func ExampleInReadme() { + type customType int + type testStruct struct { + S string + V *map[string]int + I interface{} + } + + a := testStruct{ + S: "hello", + V: &map[string]int{"foo": 0, "bar": 1}, + I: customType(42), + } + + fmt.Println("Render test:") + fmt.Printf("fmt.Printf: %s\n", sanitizePointer(fmt.Sprintf("%#v", a))) + fmt.Printf("render.Render: %s\n", Render(a)) + // Output: Render test: + // fmt.Printf: render.testStruct{S:"hello", V:(*map[string]int)(0x600dd065), I:42} + // render.Render: render.testStruct{S:"hello", V:(*map[string]int){"bar":1, "foo":0}, I:render.customType(42)} +} + +var pointerRE = regexp.MustCompile(`\(0x[a-f0-9]+\)`) + +func sanitizePointer(s string) string { + return pointerRE.ReplaceAllString(s, "(0x600dd065)") +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/.gitignore b/vendor/github.com/smartystreets/assertions/internal/oglematchers/.gitignore new file mode 100644 index 0000000000..dd8fc7468f --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/.gitignore @@ -0,0 +1,5 @@ +*.6 +6.out +_obj/ +_test/ +_testmain.go diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml b/vendor/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml new file mode 100644 index 0000000000..b97211926e --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/.travis.yml @@ -0,0 +1,4 @@ +# Cf. http://docs.travis-ci.com/user/getting-started/ +# Cf. http://docs.travis-ci.com/user/languages/go/ + +language: go diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go new file mode 100644 index 0000000000..0f9d198fcb --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/all_of_test.go @@ -0,0 +1,110 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" + "errors" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type allOfFakeMatcher struct { + desc string + err error +} + +func (m *allOfFakeMatcher) Matches(c interface{}) error { + return m.err +} + +func (m *allOfFakeMatcher) Description() string { + return m.desc +} + +type AllOfTest struct { +} + +func init() { RegisterTestSuite(&AllOfTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *AllOfTest) DescriptionWithEmptySet() { + m := AllOf() + ExpectEq("is anything", m.Description()) +} + +func (t *AllOfTest) DescriptionWithOneMatcher() { + m := AllOf(&allOfFakeMatcher{"taco", errors.New("")}) + ExpectEq("taco", m.Description()) +} + +func (t *AllOfTest) DescriptionWithMultipleMatchers() { + m := AllOf( + &allOfFakeMatcher{"taco", errors.New("")}, + &allOfFakeMatcher{"burrito", errors.New("")}, + &allOfFakeMatcher{"enchilada", errors.New("")}) + + ExpectEq("taco, and burrito, and enchilada", m.Description()) +} + +func (t *AllOfTest) EmptySet() { + m := AllOf() + err := m.Matches(17) + + ExpectEq(nil, err) +} + +func (t *AllOfTest) OneMatcherReturnsFatalErrorAndSomeOthersFail() { + m := AllOf( + &allOfFakeMatcher{"", errors.New("")}, + &allOfFakeMatcher{"", NewFatalError("taco")}, + &allOfFakeMatcher{"", errors.New("")}, + &allOfFakeMatcher{"", nil}) + + err := m.Matches(17) + + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("taco"))) +} + +func (t *AllOfTest) OneMatcherReturnsNonFatalAndOthersSayTrue() { + m := AllOf( + &allOfFakeMatcher{"", nil}, + &allOfFakeMatcher{"", errors.New("taco")}, + &allOfFakeMatcher{"", nil}) + + err := m.Matches(17) + + ExpectFalse(isFatal(err)) + ExpectThat(err, Error(Equals("taco"))) +} + +func (t *AllOfTest) AllMatchersSayTrue() { + m := AllOf( + &allOfFakeMatcher{"", nil}, + &allOfFakeMatcher{"", nil}, + &allOfFakeMatcher{"", nil}) + + err := m.Matches(17) + + ExpectEq(nil, err) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go new file mode 100644 index 0000000000..f0b5025406 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_of_test.go @@ -0,0 +1,139 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "errors" + + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type fakeAnyOfMatcher struct { + desc string + err error +} + +func (m *fakeAnyOfMatcher) Matches(c interface{}) error { + return m.err +} + +func (m *fakeAnyOfMatcher) Description() string { + return m.desc +} + +type AnyOfTest struct { +} + +func init() { RegisterTestSuite(&AnyOfTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *AnyOfTest) EmptySet() { + matcher := AnyOf() + + err := matcher.Matches(0) + ExpectThat(err, Error(Equals(""))) +} + +func (t *AnyOfTest) OneTrue() { + matcher := AnyOf( + &fakeAnyOfMatcher{"", NewFatalError("foo")}, + 17, + &fakeAnyOfMatcher{"", errors.New("foo")}, + &fakeAnyOfMatcher{"", nil}, + &fakeAnyOfMatcher{"", errors.New("foo")}, + ) + + err := matcher.Matches(0) + ExpectEq(nil, err) +} + +func (t *AnyOfTest) OneEqual() { + matcher := AnyOf( + &fakeAnyOfMatcher{"", NewFatalError("foo")}, + &fakeAnyOfMatcher{"", errors.New("foo")}, + 13, + "taco", + 19, + &fakeAnyOfMatcher{"", errors.New("foo")}, + ) + + err := matcher.Matches("taco") + ExpectEq(nil, err) +} + +func (t *AnyOfTest) OneFatal() { + matcher := AnyOf( + &fakeAnyOfMatcher{"", errors.New("foo")}, + 17, + &fakeAnyOfMatcher{"", NewFatalError("taco")}, + &fakeAnyOfMatcher{"", errors.New("foo")}, + ) + + err := matcher.Matches(0) + ExpectThat(err, Error(Equals("taco"))) +} + +func (t *AnyOfTest) OneNil() { + var err error + matcher := AnyOf( + 13, + nil, + 19, + ) + + // No match + err = matcher.Matches(14) + ExpectNe(nil, err) + + // Match + err = matcher.Matches(nil) + ExpectEq(nil, err) +} + +func (t *AnyOfTest) AllFalseAndNotEqual() { + matcher := AnyOf( + &fakeAnyOfMatcher{"", errors.New("foo")}, + 17, + &fakeAnyOfMatcher{"", errors.New("foo")}, + 19, + ) + + err := matcher.Matches(0) + ExpectThat(err, Error(Equals(""))) +} + +func (t *AnyOfTest) DescriptionForEmptySet() { + matcher := AnyOf() + ExpectEq("or()", matcher.Description()) +} + +func (t *AnyOfTest) DescriptionForNonEmptySet() { + matcher := AnyOf( + &fakeAnyOfMatcher{"taco", nil}, + "burrito", + &fakeAnyOfMatcher{"enchilada", nil}, + ) + + ExpectEq("or(taco, burrito, enchilada)", matcher.Description()) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_test.go new file mode 100644 index 0000000000..410cc12825 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/any_test.go @@ -0,0 +1,53 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type AnyTest struct { +} + +func init() { RegisterTestSuite(&AnyTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *AnyTest) Description() { + m := Any() + ExpectEq("is anything", m.Description()) +} + +func (t *AnyTest) Matches() { + var err error + m := Any() + + err = m.Matches(nil) + ExpectEq(nil, err) + + err = m.Matches(17) + ExpectEq(nil, err) + + err = m.Matches("taco") + ExpectEq(nil, err) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go new file mode 100644 index 0000000000..dfc981c148 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/contains_test.go @@ -0,0 +1,233 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type ContainsTest struct {} +func init() { RegisterTestSuite(&ContainsTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *ContainsTest) WrongTypeCandidates() { + m := Contains("") + ExpectEq("contains: ", m.Description()) + + var err error + + // Nil candidate + err = m.Matches(nil) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("array"))) + ExpectThat(err, Error(HasSubstr("slice"))) + + // String candidate + err = m.Matches("") + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("array"))) + ExpectThat(err, Error(HasSubstr("slice"))) + + // Map candidate + err = m.Matches(make(map[string]string)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("array"))) + ExpectThat(err, Error(HasSubstr("slice"))) +} + +func (t *ContainsTest) NilArgument() { + m := Contains(nil) + ExpectEq("contains: is nil", m.Description()) + + var c interface{} + var err error + + // Empty array of pointers + c = [...]*int{} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Empty slice of pointers + c = []*int{} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-empty array of integers + c = [...]int{17, 0, 19} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-empty slice of integers + c = []int{17, 0, 19} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-matching array of pointers + c = [...]*int{new(int), new(int)} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-matching slice of pointers + c = []*int{new(int), new(int)} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Matching array of pointers + c = [...]*int{new(int), nil, new(int)} + err = m.Matches(c) + ExpectEq(nil, err) + + // Matching slice of pointers + c = []*int{new(int), nil, new(int)} + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-matching slice of pointers from matching array + someArray := [...]*int{new(int), nil, new(int)} + c = someArray[0:1] + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} + +func (t *ContainsTest) StringArgument() { + m := Contains("taco") + ExpectEq("contains: taco", m.Description()) + + var c interface{} + var err error + + // Non-matching array of strings + c = [...]string{"burrito", "enchilada"} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-matching slice of strings + c = []string{"burrito", "enchilada"} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Matching array of strings + c = [...]string{"burrito", "taco", "enchilada"} + err = m.Matches(c) + ExpectEq(nil, err) + + // Matching slice of strings + c = []string{"burrito", "taco", "enchilada"} + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-matching slice of strings from matching array + someArray := [...]string{"burrito", "taco", "enchilada"} + c = someArray[0:1] + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} + +func (t *ContainsTest) IntegerArgument() { + m := Contains(int(17)) + ExpectEq("contains: 17", m.Description()) + + var c interface{} + var err error + + // Non-matching array of integers + c = [...]int{13, 19} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-matching slice of integers + c = []int{13, 19} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Matching array of integers + c = [...]int{13, 17, 19} + err = m.Matches(c) + ExpectEq(nil, err) + + // Matching slice of integers + c = []int{13, 17, 19} + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-matching slice of integers from matching array + someArray := [...]int{13, 17, 19} + c = someArray[0:1] + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-matching array of floats + c = [...]float32{13, 17.5, 19} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-matching slice of floats + c = []float32{13, 17.5, 19} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Matching array of floats + c = [...]float32{13, 17, 19} + err = m.Matches(c) + ExpectEq(nil, err) + + // Matching slice of floats + c = []float32{13, 17, 19} + err = m.Matches(c) + ExpectEq(nil, err) +} + +func (t *ContainsTest) MatcherArgument() { + m := Contains(HasSubstr("ac")) + ExpectEq("contains: has substring \"ac\"", m.Description()) + + var c interface{} + var err error + + // Non-matching array of strings + c = [...]string{"burrito", "enchilada"} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Non-matching slice of strings + c = []string{"burrito", "enchilada"} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Matching array of strings + c = [...]string{"burrito", "taco", "enchilada"} + err = m.Matches(c) + ExpectEq(nil, err) + + // Matching slice of strings + c = []string{"burrito", "taco", "enchilada"} + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-matching slice of strings from matching array + someArray := [...]string{"burrito", "taco", "enchilada"} + c = someArray[0:1] + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go new file mode 100644 index 0000000000..a28113aaa6 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/deep_equals_test.go @@ -0,0 +1,343 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" + "bytes" + "testing" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type DeepEqualsTest struct {} +func init() { RegisterTestSuite(&DeepEqualsTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *DeepEqualsTest) WrongTypeCandidateWithScalarValue() { + var x int = 17 + m := DeepEquals(x) + + var err error + + // Nil candidate. + err = m.Matches(nil) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr(""))) + + // Int alias candidate. + type intAlias int + err = m.Matches(intAlias(x)) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("intAlias"))) + + // String candidate. + err = m.Matches("taco") + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("string"))) + + // Byte slice candidate. + err = m.Matches([]byte{}) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("[]uint8"))) + + // Other slice candidate. + err = m.Matches([]uint16{}) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("[]uint16"))) + + // Unsigned int candidate. + err = m.Matches(uint(17)) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("uint"))) +} + +func (t *DeepEqualsTest) WrongTypeCandidateWithByteSliceValue() { + x := []byte{} + m := DeepEquals(x) + + var err error + + // Nil candidate. + err = m.Matches(nil) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr(""))) + + // String candidate. + err = m.Matches("taco") + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("string"))) + + // Slice candidate with wrong value type. + err = m.Matches([]uint16{}) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("[]uint16"))) +} + +func (t *DeepEqualsTest) WrongTypeCandidateWithOtherSliceValue() { + x := []uint16{} + m := DeepEquals(x) + + var err error + + // Nil candidate. + err = m.Matches(nil) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr(""))) + + // String candidate. + err = m.Matches("taco") + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("string"))) + + // Byte slice candidate with wrong value type. + err = m.Matches([]byte{}) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("[]uint8"))) + + // Other slice candidate with wrong value type. + err = m.Matches([]uint32{}) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("[]uint32"))) +} + +func (t *DeepEqualsTest) WrongTypeCandidateWithNilLiteralValue() { + m := DeepEquals(nil) + + var err error + + // String candidate. + err = m.Matches("taco") + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("string"))) + + // Nil byte slice candidate. + err = m.Matches([]byte(nil)) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("[]uint8"))) + + // Nil other slice candidate. + err = m.Matches([]uint16(nil)) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("type"))) + ExpectThat(err, Error(HasSubstr("[]uint16"))) +} + +func (t *DeepEqualsTest) NilLiteralValue() { + m := DeepEquals(nil) + ExpectEq("deep equals: ", m.Description()) + + var c interface{} + var err error + + // Nil literal candidate. + c = nil + err = m.Matches(c) + ExpectEq(nil, err) +} + +func (t *DeepEqualsTest) IntValue() { + m := DeepEquals(int(17)) + ExpectEq("deep equals: 17", m.Description()) + + var c interface{} + var err error + + // Matching int. + c = int(17) + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-matching int. + c = int(18) + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} + +func (t *DeepEqualsTest) ByteSliceValue() { + x := []byte{17, 19} + m := DeepEquals(x) + ExpectEq("deep equals: [17 19]", m.Description()) + + var c []byte + var err error + + // Matching. + c = make([]byte, len(x)) + AssertEq(len(x), copy(c, x)) + + err = m.Matches(c) + ExpectEq(nil, err) + + // Nil slice. + c = []byte(nil) + err = m.Matches(c) + ExpectThat(err, Error(Equals("which is nil"))) + + // Prefix. + AssertGt(len(x), 1) + c = make([]byte, len(x)-1) + AssertEq(len(x)-1, copy(c, x)) + + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Suffix. + c = make([]byte, len(x)+1) + AssertEq(len(x), copy(c, x)) + + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} + +func (t *DeepEqualsTest) OtherSliceValue() { + x := []uint16{17, 19} + m := DeepEquals(x) + ExpectEq("deep equals: [17 19]", m.Description()) + + var c []uint16 + var err error + + // Matching. + c = make([]uint16, len(x)) + AssertEq(len(x), copy(c, x)) + + err = m.Matches(c) + ExpectEq(nil, err) + + // Nil slice. + c = []uint16(nil) + err = m.Matches(c) + ExpectThat(err, Error(Equals("which is nil"))) + + // Prefix. + AssertGt(len(x), 1) + c = make([]uint16, len(x)-1) + AssertEq(len(x)-1, copy(c, x)) + + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) + + // Suffix. + c = make([]uint16, len(x)+1) + AssertEq(len(x), copy(c, x)) + + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} + +func (t *DeepEqualsTest) NilByteSliceValue() { + x := []byte(nil) + m := DeepEquals(x) + ExpectEq("deep equals: ", m.Description()) + + var c []byte + var err error + + // Nil slice. + c = []byte(nil) + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-nil slice. + c = []byte{} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} + +func (t *DeepEqualsTest) NilOtherSliceValue() { + x := []uint16(nil) + m := DeepEquals(x) + ExpectEq("deep equals: ", m.Description()) + + var c []uint16 + var err error + + // Nil slice. + c = []uint16(nil) + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-nil slice. + c = []uint16{} + err = m.Matches(c) + ExpectThat(err, Error(Equals(""))) +} + +//////////////////////////////////////////////////////////////////////// +// Benchmarks +//////////////////////////////////////////////////////////////////////// + +func benchmarkWithSize(b *testing.B, size int) { + b.StopTimer() + buf := bytes.Repeat([]byte{0x01}, size) + bufCopy := make([]byte, size) + copy(bufCopy, buf) + + matcher := DeepEquals(buf) + b.StartTimer() + + for i := 0; i < b.N; i++ { + matcher.Matches(bufCopy) + } + + b.SetBytes(int64(size)) +} + +func BenchmarkShortByteSlice(b *testing.B) { + benchmarkWithSize(b, 256) +} + +func BenchmarkLongByteSlice(b *testing.B) { + benchmarkWithSize(b, 1<<24) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go new file mode 100644 index 0000000000..172584fa14 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/elements_are_test.go @@ -0,0 +1,208 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type ElementsAreTest struct { +} + +func init() { RegisterTestSuite(&ElementsAreTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *ElementsAreTest) EmptySet() { + m := ElementsAre() + ExpectEq("elements are: []", m.Description()) + + var c []interface{} + var err error + + // No candidates. + c = []interface{}{} + err = m.Matches(c) + ExpectEq(nil, err) + + // One candidate. + c = []interface{}{17} + err = m.Matches(c) + ExpectThat(err, Error(HasSubstr("length 1"))) +} + +func (t *ElementsAreTest) OneMatcher() { + m := ElementsAre(LessThan(17)) + ExpectEq("elements are: [less than 17]", m.Description()) + + var c []interface{} + var err error + + // No candidates. + c = []interface{}{} + err = m.Matches(c) + ExpectThat(err, Error(HasSubstr("length 0"))) + + // Matching candidate. + c = []interface{}{16} + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-matching candidate. + c = []interface{}{19} + err = m.Matches(c) + ExpectNe(nil, err) + + // Two candidates. + c = []interface{}{17, 19} + err = m.Matches(c) + ExpectThat(err, Error(HasSubstr("length 2"))) +} + +func (t *ElementsAreTest) OneValue() { + m := ElementsAre(17) + ExpectEq("elements are: [17]", m.Description()) + + var c []interface{} + var err error + + // No candidates. + c = []interface{}{} + err = m.Matches(c) + ExpectThat(err, Error(HasSubstr("length 0"))) + + // Matching int. + c = []interface{}{int(17)} + err = m.Matches(c) + ExpectEq(nil, err) + + // Matching float. + c = []interface{}{float32(17)} + err = m.Matches(c) + ExpectEq(nil, err) + + // Non-matching candidate. + c = []interface{}{19} + err = m.Matches(c) + ExpectNe(nil, err) + + // Two candidates. + c = []interface{}{17, 19} + err = m.Matches(c) + ExpectThat(err, Error(HasSubstr("length 2"))) +} + +func (t *ElementsAreTest) MultipleElements() { + m := ElementsAre("taco", LessThan(17)) + ExpectEq("elements are: [taco, less than 17]", m.Description()) + + var c []interface{} + var err error + + // One candidate. + c = []interface{}{17} + err = m.Matches(c) + ExpectThat(err, Error(HasSubstr("length 1"))) + + // Both matching. + c = []interface{}{"taco", 16} + err = m.Matches(c) + ExpectEq(nil, err) + + // First non-matching. + c = []interface{}{"burrito", 16} + err = m.Matches(c) + ExpectThat(err, Error(Equals("whose element 0 doesn't match"))) + + // Second non-matching. + c = []interface{}{"taco", 17} + err = m.Matches(c) + ExpectThat(err, Error(Equals("whose element 1 doesn't match"))) + + // Three candidates. + c = []interface{}{"taco", 17, 19} + err = m.Matches(c) + ExpectThat(err, Error(HasSubstr("length 3"))) +} + +func (t *ElementsAreTest) ArrayCandidates() { + m := ElementsAre("taco", LessThan(17)) + + var err error + + // One candidate. + err = m.Matches([1]interface{}{"taco"}) + ExpectThat(err, Error(HasSubstr("length 1"))) + + // Both matching. + err = m.Matches([2]interface{}{"taco", 16}) + ExpectEq(nil, err) + + // First non-matching. + err = m.Matches([2]interface{}{"burrito", 16}) + ExpectThat(err, Error(Equals("whose element 0 doesn't match"))) +} + +func (t *ElementsAreTest) WrongTypeCandidate() { + m := ElementsAre("taco") + + var err error + + // String candidate. + err = m.Matches("taco") + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("array"))) + ExpectThat(err, Error(HasSubstr("slice"))) + + // Map candidate. + err = m.Matches(map[string]string{}) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("array"))) + ExpectThat(err, Error(HasSubstr("slice"))) + + // Nil candidate. + err = m.Matches(nil) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("array"))) + ExpectThat(err, Error(HasSubstr("slice"))) +} + +func (t *ElementsAreTest) PropagatesFatality() { + m := ElementsAre(LessThan(17)) + ExpectEq("elements are: [less than 17]", m.Description()) + + var c []interface{} + var err error + + // Non-fatal error. + c = []interface{}{19} + err = m.Matches(c) + AssertNe(nil, err) + ExpectFalse(isFatal(err)) + + // Fatal error. + c = []interface{}{"taco"} + err = m.Matches(c) + AssertNe(nil, err) + ExpectTrue(isFatal(err)) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go new file mode 100644 index 0000000000..6ac5df2732 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/equals_test.go @@ -0,0 +1,3864 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "fmt" + "math" + "unsafe" + + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +var someInt int = -17 + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type EqualsTest struct { +} + +func init() { RegisterTestSuite(&EqualsTest{}) } + +type equalsTestCase struct { + candidate interface{} + expectedResult bool + shouldBeFatal bool + expectedError string +} + +func (t *EqualsTest) checkTestCases(matcher Matcher, cases []equalsTestCase) { + for i, c := range cases { + err := matcher.Matches(c.candidate) + ExpectEq( + c.expectedResult, + (err == nil), + "Result for case %d: %v (Error: %v)", i, c, err) + + if err == nil { + continue + } + + _, isFatal := err.(*FatalError) + ExpectEq(c.shouldBeFatal, isFatal, "Fatality for case %d: %v", i, c) + + ExpectThat(err, Error(Equals(c.expectedError)), "Case %d: %v", i, c) + } +} + +//////////////////////////////////////////////////////////////////////// +// nil +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) EqualsNil() { + matcher := Equals(nil) + ExpectEq("is nil", matcher.Description()) + + cases := []equalsTestCase{ + // Legal types + equalsTestCase{nil, true, false, ""}, + equalsTestCase{chan int(nil), true, false, ""}, + equalsTestCase{(func())(nil), true, false, ""}, + equalsTestCase{interface{}(nil), true, false, ""}, + equalsTestCase{map[int]int(nil), true, false, ""}, + equalsTestCase{(*int)(nil), true, false, ""}, + equalsTestCase{[]int(nil), true, false, ""}, + + equalsTestCase{make(chan int), false, false, ""}, + equalsTestCase{func() {}, false, false, ""}, + equalsTestCase{map[int]int{}, false, false, ""}, + equalsTestCase{&someInt, false, false, ""}, + equalsTestCase{[]int{}, false, false, ""}, + + // Illegal types + equalsTestCase{17, false, true, "which cannot be compared to nil"}, + equalsTestCase{int8(17), false, true, "which cannot be compared to nil"}, + equalsTestCase{uintptr(17), false, true, "which cannot be compared to nil"}, + equalsTestCase{[...]int{}, false, true, "which cannot be compared to nil"}, + equalsTestCase{"taco", false, true, "which cannot be compared to nil"}, + equalsTestCase{equalsTestCase{}, false, true, "which cannot be compared to nil"}, + equalsTestCase{unsafe.Pointer(&someInt), false, true, "which cannot be compared to nil"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeIntegerLiteral() { + // -2^30 + matcher := Equals(-1073741824) + ExpectEq("-1073741824", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -1073741824. + equalsTestCase{-1073741824, true, false, ""}, + equalsTestCase{-1073741824.0, true, false, ""}, + equalsTestCase{-1073741824 + 0i, true, false, ""}, + equalsTestCase{int(-1073741824), true, false, ""}, + equalsTestCase{int32(-1073741824), true, false, ""}, + equalsTestCase{int64(-1073741824), true, false, ""}, + equalsTestCase{float32(-1073741824), true, false, ""}, + equalsTestCase{float64(-1073741824), true, false, ""}, + equalsTestCase{complex64(-1073741824), true, false, ""}, + equalsTestCase{complex128(-1073741824), true, false, ""}, + equalsTestCase{interface{}(int(-1073741824)), true, false, ""}, + + // Values that would be -1073741824 in two's complement. + equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""}, + + // Non-equal values of signed integer type. + equalsTestCase{int(-1073741823), false, false, ""}, + equalsTestCase{int32(-1073741823), false, false, ""}, + equalsTestCase{int64(-1073741823), false, false, ""}, + + // Non-equal values of other numeric types. + equalsTestCase{float64(-1073741824.1), false, false, ""}, + equalsTestCase{float64(-1073741823.9), false, false, ""}, + equalsTestCase{complex128(-1073741823), false, false, ""}, + equalsTestCase{complex128(-1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveIntegerLiteral() { + // 2^30 + matcher := Equals(1073741824) + ExpectEq("1073741824", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 1073741824. + equalsTestCase{1073741824, true, false, ""}, + equalsTestCase{1073741824.0, true, false, ""}, + equalsTestCase{1073741824 + 0i, true, false, ""}, + equalsTestCase{int(1073741824), true, false, ""}, + equalsTestCase{uint(1073741824), true, false, ""}, + equalsTestCase{int32(1073741824), true, false, ""}, + equalsTestCase{int64(1073741824), true, false, ""}, + equalsTestCase{uint32(1073741824), true, false, ""}, + equalsTestCase{uint64(1073741824), true, false, ""}, + equalsTestCase{uintptr(1073741824), true, false, ""}, + equalsTestCase{float32(1073741824), true, false, ""}, + equalsTestCase{float64(1073741824), true, false, ""}, + equalsTestCase{complex64(1073741824), true, false, ""}, + equalsTestCase{complex128(1073741824), true, false, ""}, + equalsTestCase{interface{}(int(1073741824)), true, false, ""}, + equalsTestCase{interface{}(uint(1073741824)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(1073741823), false, false, ""}, + equalsTestCase{int32(1073741823), false, false, ""}, + equalsTestCase{int64(1073741823), false, false, ""}, + equalsTestCase{float64(1073741824.1), false, false, ""}, + equalsTestCase{float64(1073741823.9), false, false, ""}, + equalsTestCase{complex128(1073741823), false, false, ""}, + equalsTestCase{complex128(1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Floating point literals +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeIntegralFloatingPointLiteral() { + // -2^30 + matcher := Equals(-1073741824.0) + ExpectEq("-1.073741824e+09", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -1073741824. + equalsTestCase{-1073741824, true, false, ""}, + equalsTestCase{-1073741824.0, true, false, ""}, + equalsTestCase{-1073741824 + 0i, true, false, ""}, + equalsTestCase{int(-1073741824), true, false, ""}, + equalsTestCase{int32(-1073741824), true, false, ""}, + equalsTestCase{int64(-1073741824), true, false, ""}, + equalsTestCase{float32(-1073741824), true, false, ""}, + equalsTestCase{float64(-1073741824), true, false, ""}, + equalsTestCase{complex64(-1073741824), true, false, ""}, + equalsTestCase{complex128(-1073741824), true, false, ""}, + equalsTestCase{interface{}(int(-1073741824)), true, false, ""}, + equalsTestCase{interface{}(float64(-1073741824)), true, false, ""}, + + // Values that would be -1073741824 in two's complement. + equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""}, + + // Non-equal values of signed integer type. + equalsTestCase{int(-1073741823), false, false, ""}, + equalsTestCase{int32(-1073741823), false, false, ""}, + equalsTestCase{int64(-1073741823), false, false, ""}, + + // Non-equal values of other numeric types. + equalsTestCase{float64(-1073741824.1), false, false, ""}, + equalsTestCase{float64(-1073741823.9), false, false, ""}, + equalsTestCase{complex128(-1073741823), false, false, ""}, + equalsTestCase{complex128(-1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveIntegralFloatingPointLiteral() { + // 2^30 + matcher := Equals(1073741824.0) + ExpectEq("1.073741824e+09", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 1073741824. + equalsTestCase{1073741824, true, false, ""}, + equalsTestCase{1073741824.0, true, false, ""}, + equalsTestCase{1073741824 + 0i, true, false, ""}, + equalsTestCase{int(1073741824), true, false, ""}, + equalsTestCase{int32(1073741824), true, false, ""}, + equalsTestCase{int64(1073741824), true, false, ""}, + equalsTestCase{uint(1073741824), true, false, ""}, + equalsTestCase{uint32(1073741824), true, false, ""}, + equalsTestCase{uint64(1073741824), true, false, ""}, + equalsTestCase{float32(1073741824), true, false, ""}, + equalsTestCase{float64(1073741824), true, false, ""}, + equalsTestCase{complex64(1073741824), true, false, ""}, + equalsTestCase{complex128(1073741824), true, false, ""}, + equalsTestCase{interface{}(int(1073741824)), true, false, ""}, + equalsTestCase{interface{}(float64(1073741824)), true, false, ""}, + + // Values that would be 1073741824 in two's complement. + equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(1073741823), false, false, ""}, + equalsTestCase{int32(1073741823), false, false, ""}, + equalsTestCase{int64(1073741823), false, false, ""}, + equalsTestCase{uint(1073741823), false, false, ""}, + equalsTestCase{uint32(1073741823), false, false, ""}, + equalsTestCase{uint64(1073741823), false, false, ""}, + equalsTestCase{float64(1073741824.1), false, false, ""}, + equalsTestCase{float64(1073741823.9), false, false, ""}, + equalsTestCase{complex128(1073741823), false, false, ""}, + equalsTestCase{complex128(1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NonIntegralFloatingPointLiteral() { + matcher := Equals(17.1) + ExpectEq("17.1", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 17.1. + equalsTestCase{17.1, true, false, ""}, + equalsTestCase{17.1, true, false, ""}, + equalsTestCase{17.1 + 0i, true, false, ""}, + equalsTestCase{float32(17.1), true, false, ""}, + equalsTestCase{float64(17.1), true, false, ""}, + equalsTestCase{complex64(17.1), true, false, ""}, + equalsTestCase{complex128(17.1), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{17, false, false, ""}, + equalsTestCase{17.2, false, false, ""}, + equalsTestCase{18, false, false, ""}, + equalsTestCase{int(17), false, false, ""}, + equalsTestCase{int(18), false, false, ""}, + equalsTestCase{int32(17), false, false, ""}, + equalsTestCase{int64(17), false, false, ""}, + equalsTestCase{uint(17), false, false, ""}, + equalsTestCase{uint32(17), false, false, ""}, + equalsTestCase{uint64(17), false, false, ""}, + equalsTestCase{uintptr(17), false, false, ""}, + equalsTestCase{complex128(17.1 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// bool +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) False() { + matcher := Equals(false) + ExpectEq("false", matcher.Description()) + + cases := []equalsTestCase{ + // bools + equalsTestCase{false, true, false, ""}, + equalsTestCase{bool(false), true, false, ""}, + + equalsTestCase{true, false, false, ""}, + equalsTestCase{bool(true), false, false, ""}, + + // Other types. + equalsTestCase{int(0), false, true, "which is not a bool"}, + equalsTestCase{int8(0), false, true, "which is not a bool"}, + equalsTestCase{int16(0), false, true, "which is not a bool"}, + equalsTestCase{int32(0), false, true, "which is not a bool"}, + equalsTestCase{int64(0), false, true, "which is not a bool"}, + equalsTestCase{uint(0), false, true, "which is not a bool"}, + equalsTestCase{uint8(0), false, true, "which is not a bool"}, + equalsTestCase{uint16(0), false, true, "which is not a bool"}, + equalsTestCase{uint32(0), false, true, "which is not a bool"}, + equalsTestCase{uint64(0), false, true, "which is not a bool"}, + equalsTestCase{uintptr(0), false, true, "which is not a bool"}, + equalsTestCase{[...]int{}, false, true, "which is not a bool"}, + equalsTestCase{make(chan int), false, true, "which is not a bool"}, + equalsTestCase{func() {}, false, true, "which is not a bool"}, + equalsTestCase{map[int]int{}, false, true, "which is not a bool"}, + equalsTestCase{&someInt, false, true, "which is not a bool"}, + equalsTestCase{[]int{}, false, true, "which is not a bool"}, + equalsTestCase{"taco", false, true, "which is not a bool"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a bool"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) True() { + matcher := Equals(true) + ExpectEq("true", matcher.Description()) + + cases := []equalsTestCase{ + // bools + equalsTestCase{true, true, false, ""}, + equalsTestCase{bool(true), true, false, ""}, + + equalsTestCase{false, false, false, ""}, + equalsTestCase{bool(false), false, false, ""}, + + // Other types. + equalsTestCase{int(1), false, true, "which is not a bool"}, + equalsTestCase{int8(1), false, true, "which is not a bool"}, + equalsTestCase{int16(1), false, true, "which is not a bool"}, + equalsTestCase{int32(1), false, true, "which is not a bool"}, + equalsTestCase{int64(1), false, true, "which is not a bool"}, + equalsTestCase{uint(1), false, true, "which is not a bool"}, + equalsTestCase{uint8(1), false, true, "which is not a bool"}, + equalsTestCase{uint16(1), false, true, "which is not a bool"}, + equalsTestCase{uint32(1), false, true, "which is not a bool"}, + equalsTestCase{uint64(1), false, true, "which is not a bool"}, + equalsTestCase{uintptr(1), false, true, "which is not a bool"}, + equalsTestCase{[...]int{}, false, true, "which is not a bool"}, + equalsTestCase{make(chan int), false, true, "which is not a bool"}, + equalsTestCase{func() {}, false, true, "which is not a bool"}, + equalsTestCase{map[int]int{}, false, true, "which is not a bool"}, + equalsTestCase{&someInt, false, true, "which is not a bool"}, + equalsTestCase{[]int{}, false, true, "which is not a bool"}, + equalsTestCase{"taco", false, true, "which is not a bool"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a bool"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// int +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeInt() { + // -2^30 + matcher := Equals(int(-1073741824)) + ExpectEq("-1073741824", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -1073741824. + equalsTestCase{-1073741824, true, false, ""}, + equalsTestCase{-1073741824.0, true, false, ""}, + equalsTestCase{-1073741824 + 0i, true, false, ""}, + equalsTestCase{int(-1073741824), true, false, ""}, + equalsTestCase{int32(-1073741824), true, false, ""}, + equalsTestCase{int64(-1073741824), true, false, ""}, + equalsTestCase{float32(-1073741824), true, false, ""}, + equalsTestCase{float64(-1073741824), true, false, ""}, + equalsTestCase{complex64(-1073741824), true, false, ""}, + equalsTestCase{complex128(-1073741824), true, false, ""}, + equalsTestCase{interface{}(int(-1073741824)), true, false, ""}, + + // Values that would be -1073741824 in two's complement. + equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""}, + + // Non-equal values of signed integer type. + equalsTestCase{int(-1073741823), false, false, ""}, + equalsTestCase{int32(-1073741823), false, false, ""}, + equalsTestCase{int64(-1073741823), false, false, ""}, + + // Non-equal values of other numeric types. + equalsTestCase{float64(-1073741824.1), false, false, ""}, + equalsTestCase{float64(-1073741823.9), false, false, ""}, + equalsTestCase{complex128(-1073741823), false, false, ""}, + equalsTestCase{complex128(-1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveInt() { + // 2^30 + matcher := Equals(int(1073741824)) + ExpectEq("1073741824", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 1073741824. + equalsTestCase{1073741824, true, false, ""}, + equalsTestCase{1073741824.0, true, false, ""}, + equalsTestCase{1073741824 + 0i, true, false, ""}, + equalsTestCase{int(1073741824), true, false, ""}, + equalsTestCase{uint(1073741824), true, false, ""}, + equalsTestCase{int32(1073741824), true, false, ""}, + equalsTestCase{int64(1073741824), true, false, ""}, + equalsTestCase{uint32(1073741824), true, false, ""}, + equalsTestCase{uint64(1073741824), true, false, ""}, + equalsTestCase{uintptr(1073741824), true, false, ""}, + equalsTestCase{float32(1073741824), true, false, ""}, + equalsTestCase{float64(1073741824), true, false, ""}, + equalsTestCase{complex64(1073741824), true, false, ""}, + equalsTestCase{complex128(1073741824), true, false, ""}, + equalsTestCase{interface{}(int(1073741824)), true, false, ""}, + equalsTestCase{interface{}(uint(1073741824)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(1073741823), false, false, ""}, + equalsTestCase{int32(1073741823), false, false, ""}, + equalsTestCase{int64(1073741823), false, false, ""}, + equalsTestCase{float64(1073741824.1), false, false, ""}, + equalsTestCase{float64(1073741823.9), false, false, ""}, + equalsTestCase{complex128(1073741823), false, false, ""}, + equalsTestCase{complex128(1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// int8 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeInt8() { + matcher := Equals(int8(-17)) + ExpectEq("-17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -17. + equalsTestCase{-17, true, false, ""}, + equalsTestCase{-17.0, true, false, ""}, + equalsTestCase{-17 + 0i, true, false, ""}, + equalsTestCase{int(-17), true, false, ""}, + equalsTestCase{int8(-17), true, false, ""}, + equalsTestCase{int16(-17), true, false, ""}, + equalsTestCase{int32(-17), true, false, ""}, + equalsTestCase{int64(-17), true, false, ""}, + equalsTestCase{float32(-17), true, false, ""}, + equalsTestCase{float64(-17), true, false, ""}, + equalsTestCase{complex64(-17), true, false, ""}, + equalsTestCase{complex128(-17), true, false, ""}, + equalsTestCase{interface{}(int(-17)), true, false, ""}, + + // Values that would be -17 in two's complement. + equalsTestCase{uint((1 << 32) - 17), false, false, ""}, + equalsTestCase{uint8((1 << 8) - 17), false, false, ""}, + equalsTestCase{uint16((1 << 16) - 17), false, false, ""}, + equalsTestCase{uint32((1 << 32) - 17), false, false, ""}, + equalsTestCase{uint64((1 << 64) - 17), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 17), false, false, ""}, + + // Non-equal values of signed integer type. + equalsTestCase{int(-16), false, false, ""}, + equalsTestCase{int8(-16), false, false, ""}, + equalsTestCase{int16(-16), false, false, ""}, + equalsTestCase{int32(-16), false, false, ""}, + equalsTestCase{int64(-16), false, false, ""}, + + // Non-equal values of other numeric types. + equalsTestCase{float32(-17.1), false, false, ""}, + equalsTestCase{float32(-16.9), false, false, ""}, + equalsTestCase{complex64(-16), false, false, ""}, + equalsTestCase{complex64(-17 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{-17}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{-17}, false, true, "which is not numeric"}, + equalsTestCase{"-17", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ZeroInt8() { + matcher := Equals(int8(0)) + ExpectEq("0", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 0. + equalsTestCase{0, true, false, ""}, + equalsTestCase{0.0, true, false, ""}, + equalsTestCase{0 + 0i, true, false, ""}, + equalsTestCase{int(0), true, false, ""}, + equalsTestCase{int8(0), true, false, ""}, + equalsTestCase{int16(0), true, false, ""}, + equalsTestCase{int32(0), true, false, ""}, + equalsTestCase{int64(0), true, false, ""}, + equalsTestCase{float32(0), true, false, ""}, + equalsTestCase{float64(0), true, false, ""}, + equalsTestCase{complex64(0), true, false, ""}, + equalsTestCase{complex128(0), true, false, ""}, + equalsTestCase{interface{}(int(0)), true, false, ""}, + equalsTestCase{uint(0), true, false, ""}, + equalsTestCase{uint8(0), true, false, ""}, + equalsTestCase{uint16(0), true, false, ""}, + equalsTestCase{uint32(0), true, false, ""}, + equalsTestCase{uint64(0), true, false, ""}, + equalsTestCase{uintptr(0), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(1), false, false, ""}, + equalsTestCase{int8(1), false, false, ""}, + equalsTestCase{int16(1), false, false, ""}, + equalsTestCase{int32(1), false, false, ""}, + equalsTestCase{int64(1), false, false, ""}, + equalsTestCase{float32(-0.1), false, false, ""}, + equalsTestCase{float32(0.1), false, false, ""}, + equalsTestCase{complex64(1), false, false, ""}, + equalsTestCase{complex64(0 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{0}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{0}, false, true, "which is not numeric"}, + equalsTestCase{"0", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveInt8() { + matcher := Equals(int8(17)) + ExpectEq("17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 17. + equalsTestCase{17, true, false, ""}, + equalsTestCase{17.0, true, false, ""}, + equalsTestCase{17 + 0i, true, false, ""}, + equalsTestCase{int(17), true, false, ""}, + equalsTestCase{int8(17), true, false, ""}, + equalsTestCase{int16(17), true, false, ""}, + equalsTestCase{int32(17), true, false, ""}, + equalsTestCase{int64(17), true, false, ""}, + equalsTestCase{float32(17), true, false, ""}, + equalsTestCase{float64(17), true, false, ""}, + equalsTestCase{complex64(17), true, false, ""}, + equalsTestCase{complex128(17), true, false, ""}, + equalsTestCase{interface{}(int(17)), true, false, ""}, + equalsTestCase{uint(17), true, false, ""}, + equalsTestCase{uint8(17), true, false, ""}, + equalsTestCase{uint16(17), true, false, ""}, + equalsTestCase{uint32(17), true, false, ""}, + equalsTestCase{uint64(17), true, false, ""}, + equalsTestCase{uintptr(17), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(16), false, false, ""}, + equalsTestCase{int8(16), false, false, ""}, + equalsTestCase{int16(16), false, false, ""}, + equalsTestCase{int32(16), false, false, ""}, + equalsTestCase{int64(16), false, false, ""}, + equalsTestCase{float32(16.9), false, false, ""}, + equalsTestCase{float32(17.1), false, false, ""}, + equalsTestCase{complex64(16), false, false, ""}, + equalsTestCase{complex64(17 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{17}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{17}, false, true, "which is not numeric"}, + equalsTestCase{"17", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// int16 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeInt16() { + matcher := Equals(int16(-32766)) + ExpectEq("-32766", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -32766. + equalsTestCase{-32766, true, false, ""}, + equalsTestCase{-32766.0, true, false, ""}, + equalsTestCase{-32766 + 0i, true, false, ""}, + equalsTestCase{int(-32766), true, false, ""}, + equalsTestCase{int16(-32766), true, false, ""}, + equalsTestCase{int32(-32766), true, false, ""}, + equalsTestCase{int64(-32766), true, false, ""}, + equalsTestCase{float32(-32766), true, false, ""}, + equalsTestCase{float64(-32766), true, false, ""}, + equalsTestCase{complex64(-32766), true, false, ""}, + equalsTestCase{complex128(-32766), true, false, ""}, + equalsTestCase{interface{}(int(-32766)), true, false, ""}, + + // Values that would be -32766 in two's complement. + equalsTestCase{uint((1 << 32) - 32766), false, false, ""}, + equalsTestCase{uint16((1 << 16) - 32766), false, false, ""}, + equalsTestCase{uint32((1 << 32) - 32766), false, false, ""}, + equalsTestCase{uint64((1 << 64) - 32766), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 32766), false, false, ""}, + + // Non-equal values of signed integer type. + equalsTestCase{int(-16), false, false, ""}, + equalsTestCase{int8(-16), false, false, ""}, + equalsTestCase{int16(-16), false, false, ""}, + equalsTestCase{int32(-16), false, false, ""}, + equalsTestCase{int64(-16), false, false, ""}, + + // Non-equal values of other numeric types. + equalsTestCase{float32(-32766.1), false, false, ""}, + equalsTestCase{float32(-32765.9), false, false, ""}, + equalsTestCase{complex64(-32766.1), false, false, ""}, + equalsTestCase{complex64(-32766 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{-32766}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{-32766}, false, true, "which is not numeric"}, + equalsTestCase{"-32766", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ZeroInt16() { + matcher := Equals(int16(0)) + ExpectEq("0", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 0. + equalsTestCase{0, true, false, ""}, + equalsTestCase{0.0, true, false, ""}, + equalsTestCase{0 + 0i, true, false, ""}, + equalsTestCase{int(0), true, false, ""}, + equalsTestCase{int8(0), true, false, ""}, + equalsTestCase{int16(0), true, false, ""}, + equalsTestCase{int32(0), true, false, ""}, + equalsTestCase{int64(0), true, false, ""}, + equalsTestCase{float32(0), true, false, ""}, + equalsTestCase{float64(0), true, false, ""}, + equalsTestCase{complex64(0), true, false, ""}, + equalsTestCase{complex128(0), true, false, ""}, + equalsTestCase{interface{}(int(0)), true, false, ""}, + equalsTestCase{uint(0), true, false, ""}, + equalsTestCase{uint8(0), true, false, ""}, + equalsTestCase{uint16(0), true, false, ""}, + equalsTestCase{uint32(0), true, false, ""}, + equalsTestCase{uint64(0), true, false, ""}, + equalsTestCase{uintptr(0), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(1), false, false, ""}, + equalsTestCase{int8(1), false, false, ""}, + equalsTestCase{int16(1), false, false, ""}, + equalsTestCase{int32(1), false, false, ""}, + equalsTestCase{int64(1), false, false, ""}, + equalsTestCase{float32(-0.1), false, false, ""}, + equalsTestCase{float32(0.1), false, false, ""}, + equalsTestCase{complex64(1), false, false, ""}, + equalsTestCase{complex64(0 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{0}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{0}, false, true, "which is not numeric"}, + equalsTestCase{"0", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveInt16() { + matcher := Equals(int16(32765)) + ExpectEq("32765", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 32765. + equalsTestCase{32765, true, false, ""}, + equalsTestCase{32765.0, true, false, ""}, + equalsTestCase{32765 + 0i, true, false, ""}, + equalsTestCase{int(32765), true, false, ""}, + equalsTestCase{int16(32765), true, false, ""}, + equalsTestCase{int32(32765), true, false, ""}, + equalsTestCase{int64(32765), true, false, ""}, + equalsTestCase{float32(32765), true, false, ""}, + equalsTestCase{float64(32765), true, false, ""}, + equalsTestCase{complex64(32765), true, false, ""}, + equalsTestCase{complex128(32765), true, false, ""}, + equalsTestCase{interface{}(int(32765)), true, false, ""}, + equalsTestCase{uint(32765), true, false, ""}, + equalsTestCase{uint16(32765), true, false, ""}, + equalsTestCase{uint32(32765), true, false, ""}, + equalsTestCase{uint64(32765), true, false, ""}, + equalsTestCase{uintptr(32765), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(32764), false, false, ""}, + equalsTestCase{int16(32764), false, false, ""}, + equalsTestCase{int32(32764), false, false, ""}, + equalsTestCase{int64(32764), false, false, ""}, + equalsTestCase{float32(32764.9), false, false, ""}, + equalsTestCase{float32(32765.1), false, false, ""}, + equalsTestCase{complex64(32765.9), false, false, ""}, + equalsTestCase{complex64(32765 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{32765}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{32765}, false, true, "which is not numeric"}, + equalsTestCase{"32765", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// int32 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeInt32() { + // -2^30 + matcher := Equals(int32(-1073741824)) + ExpectEq("-1073741824", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -1073741824. + equalsTestCase{-1073741824, true, false, ""}, + equalsTestCase{-1073741824.0, true, false, ""}, + equalsTestCase{-1073741824 + 0i, true, false, ""}, + equalsTestCase{int(-1073741824), true, false, ""}, + equalsTestCase{int32(-1073741824), true, false, ""}, + equalsTestCase{int64(-1073741824), true, false, ""}, + equalsTestCase{float32(-1073741824), true, false, ""}, + equalsTestCase{float64(-1073741824), true, false, ""}, + equalsTestCase{complex64(-1073741824), true, false, ""}, + equalsTestCase{complex128(-1073741824), true, false, ""}, + equalsTestCase{interface{}(int(-1073741824)), true, false, ""}, + + // Values that would be -1073741824 in two's complement. + equalsTestCase{uint((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint32((1 << 32) - 1073741824), false, false, ""}, + equalsTestCase{uint64((1 << 64) - 1073741824), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 1073741824), false, false, ""}, + + // Non-equal values of signed integer type. + equalsTestCase{int(-1073741823), false, false, ""}, + equalsTestCase{int32(-1073741823), false, false, ""}, + equalsTestCase{int64(-1073741823), false, false, ""}, + + // Non-equal values of other numeric types. + equalsTestCase{float64(-1073741824.1), false, false, ""}, + equalsTestCase{float64(-1073741823.9), false, false, ""}, + equalsTestCase{complex128(-1073741823), false, false, ""}, + equalsTestCase{complex128(-1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveInt32() { + // 2^30 + matcher := Equals(int32(1073741824)) + ExpectEq("1073741824", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 1073741824. + equalsTestCase{1073741824, true, false, ""}, + equalsTestCase{1073741824.0, true, false, ""}, + equalsTestCase{1073741824 + 0i, true, false, ""}, + equalsTestCase{int(1073741824), true, false, ""}, + equalsTestCase{uint(1073741824), true, false, ""}, + equalsTestCase{int32(1073741824), true, false, ""}, + equalsTestCase{int64(1073741824), true, false, ""}, + equalsTestCase{uint32(1073741824), true, false, ""}, + equalsTestCase{uint64(1073741824), true, false, ""}, + equalsTestCase{uintptr(1073741824), true, false, ""}, + equalsTestCase{float32(1073741824), true, false, ""}, + equalsTestCase{float64(1073741824), true, false, ""}, + equalsTestCase{complex64(1073741824), true, false, ""}, + equalsTestCase{complex128(1073741824), true, false, ""}, + equalsTestCase{interface{}(int(1073741824)), true, false, ""}, + equalsTestCase{interface{}(uint(1073741824)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(1073741823), false, false, ""}, + equalsTestCase{int32(1073741823), false, false, ""}, + equalsTestCase{int64(1073741823), false, false, ""}, + equalsTestCase{float64(1073741824.1), false, false, ""}, + equalsTestCase{float64(1073741823.9), false, false, ""}, + equalsTestCase{complex128(1073741823), false, false, ""}, + equalsTestCase{complex128(1073741824 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// int64 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeInt64() { + // -2^40 + matcher := Equals(int64(-1099511627776)) + ExpectEq("-1099511627776", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -1099511627776. + equalsTestCase{-1099511627776.0, true, false, ""}, + equalsTestCase{-1099511627776 + 0i, true, false, ""}, + equalsTestCase{int64(-1099511627776), true, false, ""}, + equalsTestCase{float32(-1099511627776), true, false, ""}, + equalsTestCase{float64(-1099511627776), true, false, ""}, + equalsTestCase{complex64(-1099511627776), true, false, ""}, + equalsTestCase{complex128(-1099511627776), true, false, ""}, + equalsTestCase{interface{}(int64(-1099511627776)), true, false, ""}, + + // Values that would be -1099511627776 in two's complement. + equalsTestCase{uint64((1 << 64) - 1099511627776), false, false, ""}, + + // Non-equal values of signed integer type. + equalsTestCase{int64(-1099511627775), false, false, ""}, + + // Non-equal values of other numeric types. + equalsTestCase{float64(-1099511627776.1), false, false, ""}, + equalsTestCase{float64(-1099511627775.9), false, false, ""}, + equalsTestCase{complex128(-1099511627775), false, false, ""}, + equalsTestCase{complex128(-1099511627776 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveInt64() { + // 2^40 + matcher := Equals(int64(1099511627776)) + ExpectEq("1099511627776", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 1099511627776. + equalsTestCase{1099511627776.0, true, false, ""}, + equalsTestCase{1099511627776 + 0i, true, false, ""}, + equalsTestCase{int64(1099511627776), true, false, ""}, + equalsTestCase{uint64(1099511627776), true, false, ""}, + equalsTestCase{uintptr(1099511627776), true, false, ""}, + equalsTestCase{float32(1099511627776), true, false, ""}, + equalsTestCase{float64(1099511627776), true, false, ""}, + equalsTestCase{complex64(1099511627776), true, false, ""}, + equalsTestCase{complex128(1099511627776), true, false, ""}, + equalsTestCase{interface{}(int64(1099511627776)), true, false, ""}, + equalsTestCase{interface{}(uint64(1099511627776)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(1099511627775), false, false, ""}, + equalsTestCase{uint64(1099511627775), false, false, ""}, + equalsTestCase{float64(1099511627776.1), false, false, ""}, + equalsTestCase{float64(1099511627775.9), false, false, ""}, + equalsTestCase{complex128(1099511627775), false, false, ""}, + equalsTestCase{complex128(1099511627776 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Int64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := Equals(int64(kTwoTo25 + 1)) + ExpectEq("33554433", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{int64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Single-precision floating point. + equalsTestCase{float32(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float32(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{float64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Int64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := Equals(int64(kTwoTo54 + 1)) + ExpectEq("18014398509481985", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo54 + 0), false, false, ""}, + equalsTestCase{int64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 2), false, false, ""}, + + equalsTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{float64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 3), false, false, ""}, + + equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// uint +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) SmallUint() { + const kExpected = 17 + matcher := Equals(uint(kExpected)) + ExpectEq("17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{17, true, false, ""}, + equalsTestCase{17.0, true, false, ""}, + equalsTestCase{17 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int8(kExpected), true, false, ""}, + equalsTestCase{int16(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint8(kExpected), true, false, ""}, + equalsTestCase{uint16(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{kExpected + 1, false, false, ""}, + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int8(kExpected + 1), false, false, ""}, + equalsTestCase{int16(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint8(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeUint() { + const kExpected = (1 << 16) + 17 + matcher := Equals(uint(kExpected)) + ExpectEq("65553", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{65553, true, false, ""}, + equalsTestCase{65553.0, true, false, ""}, + equalsTestCase{65553 + 0i, true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{int16(17), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(17), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) UintNotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := Equals(uint(kTwoTo25 + 1)) + ExpectEq("33554433", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{int64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Single-precision floating point. + equalsTestCase{float32(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float32(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{float64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// uint8 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) SmallUint8() { + const kExpected = 17 + matcher := Equals(uint8(kExpected)) + ExpectEq("17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{17, true, false, ""}, + equalsTestCase{17.0, true, false, ""}, + equalsTestCase{17 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int8(kExpected), true, false, ""}, + equalsTestCase{int16(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint8(kExpected), true, false, ""}, + equalsTestCase{uint16(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{kExpected + 1, false, false, ""}, + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int8(kExpected + 1), false, false, ""}, + equalsTestCase{int16(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint8(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// uint16 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) SmallUint16() { + const kExpected = 17 + matcher := Equals(uint16(kExpected)) + ExpectEq("17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{17, true, false, ""}, + equalsTestCase{17.0, true, false, ""}, + equalsTestCase{17 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int8(kExpected), true, false, ""}, + equalsTestCase{int16(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint8(kExpected), true, false, ""}, + equalsTestCase{uint16(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{kExpected + 1, false, false, ""}, + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int8(kExpected + 1), false, false, ""}, + equalsTestCase{int16(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint8(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeUint16() { + const kExpected = (1 << 8) + 17 + matcher := Equals(uint16(kExpected)) + ExpectEq("273", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{273, true, false, ""}, + equalsTestCase{273.0, true, false, ""}, + equalsTestCase{273 + 0i, true, false, ""}, + equalsTestCase{int16(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint16(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{int8(17), false, false, ""}, + equalsTestCase{int16(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint8(17), false, false, ""}, + equalsTestCase{uint16(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// uint32 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) SmallUint32() { + const kExpected = 17 + matcher := Equals(uint32(kExpected)) + ExpectEq("17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{17, true, false, ""}, + equalsTestCase{17.0, true, false, ""}, + equalsTestCase{17 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int8(kExpected), true, false, ""}, + equalsTestCase{int16(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint8(kExpected), true, false, ""}, + equalsTestCase{uint16(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{kExpected + 1, false, false, ""}, + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int8(kExpected + 1), false, false, ""}, + equalsTestCase{int16(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint8(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeUint32() { + const kExpected = (1 << 16) + 17 + matcher := Equals(uint32(kExpected)) + ExpectEq("65553", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{65553, true, false, ""}, + equalsTestCase{65553.0, true, false, ""}, + equalsTestCase{65553 + 0i, true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{int16(17), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(17), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Uint32NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := Equals(uint32(kTwoTo25 + 1)) + ExpectEq("33554433", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{int64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Single-precision floating point. + equalsTestCase{float32(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float32(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{float64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// uint64 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) SmallUint64() { + const kExpected = 17 + matcher := Equals(uint64(kExpected)) + ExpectEq("17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{17, true, false, ""}, + equalsTestCase{17.0, true, false, ""}, + equalsTestCase{17 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int8(kExpected), true, false, ""}, + equalsTestCase{int16(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint8(kExpected), true, false, ""}, + equalsTestCase{uint16(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{kExpected + 1, false, false, ""}, + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int8(kExpected + 1), false, false, ""}, + equalsTestCase{int16(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint8(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeUint64() { + const kExpected = (1 << 32) + 17 + matcher := Equals(uint64(kExpected)) + ExpectEq("4294967313", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{4294967313.0, true, false, ""}, + equalsTestCase{4294967313 + 0i, true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{int(17), false, false, ""}, + equalsTestCase{int32(17), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(17), false, false, ""}, + equalsTestCase{uint32(17), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Uint64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := Equals(uint64(kTwoTo25 + 1)) + ExpectEq("33554433", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{int64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Single-precision floating point. + equalsTestCase{float32(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float32(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{float64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 2), false, false, ""}, + + equalsTestCase{complex128(kTwoTo25 + 0), false, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 2), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Uint64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := Equals(uint64(kTwoTo54 + 1)) + ExpectEq("18014398509481985", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo54 + 0), false, false, ""}, + equalsTestCase{int64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 2), false, false, ""}, + + equalsTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{float64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 3), false, false, ""}, + + equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// uintptr +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) SmallUintptr() { + const kExpected = 17 + matcher := Equals(uintptr(kExpected)) + ExpectEq("17", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{17, true, false, ""}, + equalsTestCase{17.0, true, false, ""}, + equalsTestCase{17 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int8(kExpected), true, false, ""}, + equalsTestCase{int16(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint8(kExpected), true, false, ""}, + equalsTestCase{uint16(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{kExpected + 1, false, false, ""}, + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int8(kExpected + 1), false, false, ""}, + equalsTestCase{int16(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint8(kExpected + 1), false, false, ""}, + equalsTestCase{uint16(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeUintptr() { + const kExpected = (1 << 32) + 17 + matcher := Equals(uintptr(kExpected)) + ExpectEq("4294967313", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{4294967313.0, true, false, ""}, + equalsTestCase{4294967313 + 0i, true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric types. + equalsTestCase{int(17), false, false, ""}, + equalsTestCase{int32(17), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(17), false, false, ""}, + equalsTestCase{uint32(17), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected + 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 1), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// float32 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeIntegralFloat32() { + matcher := Equals(float32(-32769)) + ExpectEq("-32769", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -32769. + equalsTestCase{-32769.0, true, false, ""}, + equalsTestCase{-32769 + 0i, true, false, ""}, + equalsTestCase{int32(-32769), true, false, ""}, + equalsTestCase{int64(-32769), true, false, ""}, + equalsTestCase{float32(-32769), true, false, ""}, + equalsTestCase{float64(-32769), true, false, ""}, + equalsTestCase{complex64(-32769), true, false, ""}, + equalsTestCase{complex128(-32769), true, false, ""}, + equalsTestCase{interface{}(float32(-32769)), true, false, ""}, + equalsTestCase{interface{}(int64(-32769)), true, false, ""}, + + // Values that would be -32769 in two's complement. + equalsTestCase{uint64((1 << 64) - 32769), false, false, ""}, + equalsTestCase{uintptr((1 << 64) - 32769), false, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(-32770), false, false, ""}, + equalsTestCase{float32(-32769.1), false, false, ""}, + equalsTestCase{float32(-32768.9), false, false, ""}, + equalsTestCase{float64(-32769.1), false, false, ""}, + equalsTestCase{float64(-32768.9), false, false, ""}, + equalsTestCase{complex128(-32768), false, false, ""}, + equalsTestCase{complex128(-32769 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NegativeNonIntegralFloat32() { + matcher := Equals(float32(-32769.1)) + ExpectEq("-32769.1", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of -32769.1. + equalsTestCase{-32769.1, true, false, ""}, + equalsTestCase{-32769.1 + 0i, true, false, ""}, + equalsTestCase{float32(-32769.1), true, false, ""}, + equalsTestCase{float64(-32769.1), true, false, ""}, + equalsTestCase{complex64(-32769.1), true, false, ""}, + equalsTestCase{complex128(-32769.1), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int32(-32769), false, false, ""}, + equalsTestCase{int32(-32770), false, false, ""}, + equalsTestCase{int64(-32769), false, false, ""}, + equalsTestCase{int64(-32770), false, false, ""}, + equalsTestCase{float32(-32769.2), false, false, ""}, + equalsTestCase{float32(-32769.0), false, false, ""}, + equalsTestCase{float64(-32769.2), false, false, ""}, + equalsTestCase{complex128(-32769.1 + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeNegativeFloat32() { + const kExpected = -1 * (1 << 65) + matcher := Equals(float32(kExpected)) + ExpectEq("-3.689349e+19", matcher.Description()) + + floatExpected := float32(kExpected) + castedInt := int64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ZeroFloat32() { + matcher := Equals(float32(0)) + ExpectEq("0", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of zero. + equalsTestCase{0.0, true, false, ""}, + equalsTestCase{0 + 0i, true, false, ""}, + equalsTestCase{int(0), true, false, ""}, + equalsTestCase{int8(0), true, false, ""}, + equalsTestCase{int16(0), true, false, ""}, + equalsTestCase{int32(0), true, false, ""}, + equalsTestCase{int64(0), true, false, ""}, + equalsTestCase{uint(0), true, false, ""}, + equalsTestCase{uint8(0), true, false, ""}, + equalsTestCase{uint16(0), true, false, ""}, + equalsTestCase{uint32(0), true, false, ""}, + equalsTestCase{uint64(0), true, false, ""}, + equalsTestCase{uintptr(0), true, false, ""}, + equalsTestCase{float32(0), true, false, ""}, + equalsTestCase{float64(0), true, false, ""}, + equalsTestCase{complex64(0), true, false, ""}, + equalsTestCase{complex128(0), true, false, ""}, + equalsTestCase{interface{}(float32(0)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(1), false, false, ""}, + equalsTestCase{int64(-1), false, false, ""}, + equalsTestCase{float32(1), false, false, ""}, + equalsTestCase{float32(-1), false, false, ""}, + equalsTestCase{complex128(0 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveIntegralFloat32() { + matcher := Equals(float32(32769)) + ExpectEq("32769", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 32769. + equalsTestCase{32769.0, true, false, ""}, + equalsTestCase{32769 + 0i, true, false, ""}, + equalsTestCase{int(32769), true, false, ""}, + equalsTestCase{int32(32769), true, false, ""}, + equalsTestCase{int64(32769), true, false, ""}, + equalsTestCase{uint(32769), true, false, ""}, + equalsTestCase{uint32(32769), true, false, ""}, + equalsTestCase{uint64(32769), true, false, ""}, + equalsTestCase{uintptr(32769), true, false, ""}, + equalsTestCase{float32(32769), true, false, ""}, + equalsTestCase{float64(32769), true, false, ""}, + equalsTestCase{complex64(32769), true, false, ""}, + equalsTestCase{complex128(32769), true, false, ""}, + equalsTestCase{interface{}(float32(32769)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(32770), false, false, ""}, + equalsTestCase{uint64(32770), false, false, ""}, + equalsTestCase{float32(32769.1), false, false, ""}, + equalsTestCase{float32(32768.9), false, false, ""}, + equalsTestCase{float64(32769.1), false, false, ""}, + equalsTestCase{float64(32768.9), false, false, ""}, + equalsTestCase{complex128(32768), false, false, ""}, + equalsTestCase{complex128(32769 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveNonIntegralFloat32() { + matcher := Equals(float32(32769.1)) + ExpectEq("32769.1", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 32769.1. + equalsTestCase{32769.1, true, false, ""}, + equalsTestCase{32769.1 + 0i, true, false, ""}, + equalsTestCase{float32(32769.1), true, false, ""}, + equalsTestCase{float64(32769.1), true, false, ""}, + equalsTestCase{complex64(32769.1), true, false, ""}, + equalsTestCase{complex128(32769.1), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int32(32769), false, false, ""}, + equalsTestCase{int32(32770), false, false, ""}, + equalsTestCase{uint64(32769), false, false, ""}, + equalsTestCase{uint64(32770), false, false, ""}, + equalsTestCase{float32(32769.2), false, false, ""}, + equalsTestCase{float32(32769.0), false, false, ""}, + equalsTestCase{float64(32769.2), false, false, ""}, + equalsTestCase{complex128(32769.1 + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargePositiveFloat32() { + const kExpected = 1 << 65 + matcher := Equals(float32(kExpected)) + ExpectEq("3.689349e+19", matcher.Description()) + + floatExpected := float32(kExpected) + castedInt := uint64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{uint64(0), false, false, ""}, + equalsTestCase{uint64(math.MaxUint64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Float32AboveExactIntegerRange() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := Equals(float32(kTwoTo25 + 1)) + ExpectEq("3.3554432e+07", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{int64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{uint64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{uint64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 3), false, false, ""}, + + // Single-precision floating point. + equalsTestCase{float32(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float32(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex128(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex128(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// float64 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeIntegralFloat64() { + const kExpected = -(1 << 50) + matcher := Equals(float64(kExpected)) + ExpectEq("-1.125899906842624e+15", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{-1125899906842624.0, true, false, ""}, + equalsTestCase{-1125899906842624.0 + 0i, true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + equalsTestCase{interface{}(float64(kExpected)), true, false, ""}, + + // Values that would be kExpected in two's complement. + equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.5), false, false, ""}, + equalsTestCase{float64(kExpected + 0.5), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NegativeNonIntegralFloat64() { + const kTwoTo50 = 1 << 50 + const kExpected = -kTwoTo50 - 0.25 + + matcher := Equals(float64(kExpected)) + ExpectEq("-1.1258999068426242e+15", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(-kTwoTo50), false, false, ""}, + equalsTestCase{int64(-kTwoTo50 - 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.25), false, false, ""}, + equalsTestCase{float64(kExpected + 0.25), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeNegativeFloat64() { + const kExpected = -1 * (1 << 65) + matcher := Equals(float64(kExpected)) + ExpectEq("-3.6893488147419103e+19", matcher.Description()) + + floatExpected := float64(kExpected) + castedInt := int64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ZeroFloat64() { + matcher := Equals(float64(0)) + ExpectEq("0", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of zero. + equalsTestCase{0.0, true, false, ""}, + equalsTestCase{0 + 0i, true, false, ""}, + equalsTestCase{int(0), true, false, ""}, + equalsTestCase{int8(0), true, false, ""}, + equalsTestCase{int16(0), true, false, ""}, + equalsTestCase{int32(0), true, false, ""}, + equalsTestCase{int64(0), true, false, ""}, + equalsTestCase{uint(0), true, false, ""}, + equalsTestCase{uint8(0), true, false, ""}, + equalsTestCase{uint16(0), true, false, ""}, + equalsTestCase{uint32(0), true, false, ""}, + equalsTestCase{uint64(0), true, false, ""}, + equalsTestCase{uintptr(0), true, false, ""}, + equalsTestCase{float32(0), true, false, ""}, + equalsTestCase{float64(0), true, false, ""}, + equalsTestCase{complex64(0), true, false, ""}, + equalsTestCase{complex128(0), true, false, ""}, + equalsTestCase{interface{}(float32(0)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(1), false, false, ""}, + equalsTestCase{int64(-1), false, false, ""}, + equalsTestCase{float32(1), false, false, ""}, + equalsTestCase{float32(-1), false, false, ""}, + equalsTestCase{complex128(0 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveIntegralFloat64() { + const kExpected = 1 << 50 + matcher := Equals(float64(kExpected)) + ExpectEq("1.125899906842624e+15", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 32769. + equalsTestCase{1125899906842624.0, true, false, ""}, + equalsTestCase{1125899906842624.0 + 0i, true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + equalsTestCase{interface{}(float64(kExpected)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.5), false, false, ""}, + equalsTestCase{float64(kExpected + 0.5), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveNonIntegralFloat64() { + const kTwoTo50 = 1 << 50 + const kExpected = kTwoTo50 + 0.25 + matcher := Equals(float64(kExpected)) + ExpectEq("1.1258999068426242e+15", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(kTwoTo50), false, false, ""}, + equalsTestCase{int64(kTwoTo50 - 1), false, false, ""}, + equalsTestCase{float64(kExpected - 0.25), false, false, ""}, + equalsTestCase{float64(kExpected + 0.25), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargePositiveFloat64() { + const kExpected = 1 << 65 + matcher := Equals(float64(kExpected)) + ExpectEq("3.6893488147419103e+19", matcher.Description()) + + floatExpected := float64(kExpected) + castedInt := uint64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{uint64(0), false, false, ""}, + equalsTestCase{uint64(math.MaxUint64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Float64AboveExactIntegerRange() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := Equals(float64(kTwoTo54 + 1)) + ExpectEq("1.8014398509481984e+16", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{int64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 3), false, false, ""}, + + equalsTestCase{uint64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{float64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 3), false, false, ""}, + + equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// complex64 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeIntegralComplex64() { + const kExpected = -32769 + matcher := Equals(complex64(kExpected)) + ExpectEq("(-32769+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{-32769.0, true, false, ""}, + equalsTestCase{-32769.0 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + equalsTestCase{interface{}(float64(kExpected)), true, false, ""}, + + // Values that would be kExpected in two's complement. + equalsTestCase{uint32((1 << 32) + kExpected), false, false, ""}, + equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""}, + equalsTestCase{uintptr((1 << 64) + kExpected), false, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.5), false, false, ""}, + equalsTestCase{float64(kExpected + 0.5), false, false, ""}, + equalsTestCase{complex64(kExpected - 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NegativeNonIntegralComplex64() { + const kTwoTo20 = 1 << 20 + const kExpected = -kTwoTo20 - 0.25 + + matcher := Equals(complex64(kExpected)) + ExpectEq("(-1.0485762e+06+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(-kTwoTo20), false, false, ""}, + equalsTestCase{int(-kTwoTo20 - 1), false, false, ""}, + equalsTestCase{int32(-kTwoTo20), false, false, ""}, + equalsTestCase{int32(-kTwoTo20 - 1), false, false, ""}, + equalsTestCase{int64(-kTwoTo20), false, false, ""}, + equalsTestCase{int64(-kTwoTo20 - 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.25), false, false, ""}, + equalsTestCase{float64(kExpected + 0.25), false, false, ""}, + equalsTestCase{complex64(kExpected - 0.75), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected - 0.75), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeNegativeComplex64() { + const kExpected = -1 * (1 << 65) + matcher := Equals(complex64(kExpected)) + ExpectEq("(-3.689349e+19+0i)", matcher.Description()) + + floatExpected := float64(kExpected) + castedInt := int64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ZeroComplex64() { + matcher := Equals(complex64(0)) + ExpectEq("(0+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of zero. + equalsTestCase{0.0, true, false, ""}, + equalsTestCase{0 + 0i, true, false, ""}, + equalsTestCase{int(0), true, false, ""}, + equalsTestCase{int8(0), true, false, ""}, + equalsTestCase{int16(0), true, false, ""}, + equalsTestCase{int32(0), true, false, ""}, + equalsTestCase{int64(0), true, false, ""}, + equalsTestCase{uint(0), true, false, ""}, + equalsTestCase{uint8(0), true, false, ""}, + equalsTestCase{uint16(0), true, false, ""}, + equalsTestCase{uint32(0), true, false, ""}, + equalsTestCase{uint64(0), true, false, ""}, + equalsTestCase{uintptr(0), true, false, ""}, + equalsTestCase{float32(0), true, false, ""}, + equalsTestCase{float64(0), true, false, ""}, + equalsTestCase{complex64(0), true, false, ""}, + equalsTestCase{complex128(0), true, false, ""}, + equalsTestCase{interface{}(float32(0)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(1), false, false, ""}, + equalsTestCase{int64(-1), false, false, ""}, + equalsTestCase{float32(1), false, false, ""}, + equalsTestCase{float32(-1), false, false, ""}, + equalsTestCase{float64(1), false, false, ""}, + equalsTestCase{float64(-1), false, false, ""}, + equalsTestCase{complex64(0 + 2i), false, false, ""}, + equalsTestCase{complex128(0 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveIntegralComplex64() { + const kExpected = 1 << 20 + matcher := Equals(complex64(kExpected)) + ExpectEq("(1.048576e+06+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 32769. + equalsTestCase{1048576.0, true, false, ""}, + equalsTestCase{1048576.0 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + equalsTestCase{interface{}(float64(kExpected)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.5), false, false, ""}, + equalsTestCase{float64(kExpected + 0.5), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveNonIntegralComplex64() { + const kTwoTo20 = 1 << 20 + const kExpected = kTwoTo20 + 0.25 + matcher := Equals(complex64(kExpected)) + ExpectEq("(1.0485762e+06+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(kTwoTo20), false, false, ""}, + equalsTestCase{int64(kTwoTo20 - 1), false, false, ""}, + equalsTestCase{uint64(kTwoTo20), false, false, ""}, + equalsTestCase{uint64(kTwoTo20 - 1), false, false, ""}, + equalsTestCase{float32(kExpected - 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected - 0.25), false, false, ""}, + equalsTestCase{float64(kExpected + 0.25), false, false, ""}, + equalsTestCase{complex64(kExpected - 1), false, false, ""}, + equalsTestCase{complex64(kExpected - 1i), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected - 1i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargePositiveComplex64() { + const kExpected = 1 << 65 + matcher := Equals(complex64(kExpected)) + ExpectEq("(3.689349e+19+0i)", matcher.Description()) + + floatExpected := float64(kExpected) + castedInt := uint64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{uint64(0), false, false, ""}, + equalsTestCase{uint64(math.MaxUint64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Complex64AboveExactIntegerRange() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := Equals(complex64(kTwoTo25 + 1)) + ExpectEq("(3.3554432e+07+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{int64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{int64(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{uint64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{uint64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{uint64(kTwoTo25 + 3), false, false, ""}, + + // Single-precision floating point. + equalsTestCase{float32(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float32(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex64(kTwoTo25 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{float64(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{float64(kTwoTo25 + 3), false, false, ""}, + + equalsTestCase{complex128(kTwoTo25 - 2), false, false, ""}, + equalsTestCase{complex128(kTwoTo25 - 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 0), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 2), true, false, ""}, + equalsTestCase{complex128(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Complex64WithNonZeroImaginaryPart() { + const kRealPart = 17 + const kImagPart = 0.25i + const kExpected = kRealPart + kImagPart + matcher := Equals(complex64(kExpected)) + ExpectEq("(17+0.25i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kRealPart + kImagPart, true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(kRealPart), false, false, ""}, + equalsTestCase{int8(kRealPart), false, false, ""}, + equalsTestCase{int16(kRealPart), false, false, ""}, + equalsTestCase{int32(kRealPart), false, false, ""}, + equalsTestCase{int64(kRealPart), false, false, ""}, + equalsTestCase{uint(kRealPart), false, false, ""}, + equalsTestCase{uint8(kRealPart), false, false, ""}, + equalsTestCase{uint16(kRealPart), false, false, ""}, + equalsTestCase{uint32(kRealPart), false, false, ""}, + equalsTestCase{uint64(kRealPart), false, false, ""}, + equalsTestCase{float32(kRealPart), false, false, ""}, + equalsTestCase{float64(kRealPart), false, false, ""}, + equalsTestCase{complex64(kRealPart), false, false, ""}, + equalsTestCase{complex64(kRealPart + kImagPart + 0.5), false, false, ""}, + equalsTestCase{complex64(kRealPart + kImagPart + 0.5i), false, false, ""}, + equalsTestCase{complex128(kRealPart), false, false, ""}, + equalsTestCase{complex128(kRealPart + kImagPart + 0.5), false, false, ""}, + equalsTestCase{complex128(kRealPart + kImagPart + 0.5i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// complex128 +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NegativeIntegralComplex128() { + const kExpected = -32769 + matcher := Equals(complex128(kExpected)) + ExpectEq("(-32769+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{-32769.0, true, false, ""}, + equalsTestCase{-32769.0 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + equalsTestCase{interface{}(float64(kExpected)), true, false, ""}, + + // Values that would be kExpected in two's complement. + equalsTestCase{uint32((1 << 32) + kExpected), false, false, ""}, + equalsTestCase{uint64((1 << 64) + kExpected), false, false, ""}, + equalsTestCase{uintptr((1 << 64) + kExpected), false, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.5), false, false, ""}, + equalsTestCase{float64(kExpected + 0.5), false, false, ""}, + equalsTestCase{complex64(kExpected - 1), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NegativeNonIntegralComplex128() { + const kTwoTo20 = 1 << 20 + const kExpected = -kTwoTo20 - 0.25 + + matcher := Equals(complex128(kExpected)) + ExpectEq("(-1.04857625e+06+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(-kTwoTo20), false, false, ""}, + equalsTestCase{int(-kTwoTo20 - 1), false, false, ""}, + equalsTestCase{int32(-kTwoTo20), false, false, ""}, + equalsTestCase{int32(-kTwoTo20 - 1), false, false, ""}, + equalsTestCase{int64(-kTwoTo20), false, false, ""}, + equalsTestCase{int64(-kTwoTo20 - 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.25), false, false, ""}, + equalsTestCase{float64(kExpected + 0.25), false, false, ""}, + equalsTestCase{complex64(kExpected - 0.75), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected - 0.75), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargeNegativeComplex128() { + const kExpected = -1 * (1 << 65) + matcher := Equals(complex128(kExpected)) + ExpectEq("(-3.6893488147419103e+19+0i)", matcher.Description()) + + floatExpected := float64(kExpected) + castedInt := int64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex64(kExpected + 2i), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ZeroComplex128() { + matcher := Equals(complex128(0)) + ExpectEq("(0+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of zero. + equalsTestCase{0.0, true, false, ""}, + equalsTestCase{0 + 0i, true, false, ""}, + equalsTestCase{int(0), true, false, ""}, + equalsTestCase{int8(0), true, false, ""}, + equalsTestCase{int16(0), true, false, ""}, + equalsTestCase{int32(0), true, false, ""}, + equalsTestCase{int64(0), true, false, ""}, + equalsTestCase{uint(0), true, false, ""}, + equalsTestCase{uint8(0), true, false, ""}, + equalsTestCase{uint16(0), true, false, ""}, + equalsTestCase{uint32(0), true, false, ""}, + equalsTestCase{uint64(0), true, false, ""}, + equalsTestCase{uintptr(0), true, false, ""}, + equalsTestCase{float32(0), true, false, ""}, + equalsTestCase{float64(0), true, false, ""}, + equalsTestCase{complex64(0), true, false, ""}, + equalsTestCase{complex128(0), true, false, ""}, + equalsTestCase{interface{}(float32(0)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(1), false, false, ""}, + equalsTestCase{int64(-1), false, false, ""}, + equalsTestCase{float32(1), false, false, ""}, + equalsTestCase{float32(-1), false, false, ""}, + equalsTestCase{float64(1), false, false, ""}, + equalsTestCase{float64(-1), false, false, ""}, + equalsTestCase{complex64(0 + 2i), false, false, ""}, + equalsTestCase{complex128(0 + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveIntegralComplex128() { + const kExpected = 1 << 20 + matcher := Equals(complex128(kExpected)) + ExpectEq("(1.048576e+06+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of 32769. + equalsTestCase{1048576.0, true, false, ""}, + equalsTestCase{1048576.0 + 0i, true, false, ""}, + equalsTestCase{int(kExpected), true, false, ""}, + equalsTestCase{int32(kExpected), true, false, ""}, + equalsTestCase{int64(kExpected), true, false, ""}, + equalsTestCase{uint(kExpected), true, false, ""}, + equalsTestCase{uint32(kExpected), true, false, ""}, + equalsTestCase{uint64(kExpected), true, false, ""}, + equalsTestCase{uintptr(kExpected), true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + equalsTestCase{interface{}(float64(kExpected)), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(kExpected + 1), false, false, ""}, + equalsTestCase{int32(kExpected + 1), false, false, ""}, + equalsTestCase{int64(kExpected + 1), false, false, ""}, + equalsTestCase{uint(kExpected + 1), false, false, ""}, + equalsTestCase{uint32(kExpected + 1), false, false, ""}, + equalsTestCase{uint64(kExpected + 1), false, false, ""}, + equalsTestCase{uintptr(kExpected + 1), false, false, ""}, + equalsTestCase{float32(kExpected - (1 << 30)), false, false, ""}, + equalsTestCase{float32(kExpected + (1 << 30)), false, false, ""}, + equalsTestCase{float64(kExpected - 0.5), false, false, ""}, + equalsTestCase{float64(kExpected + 0.5), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + + // Non-numeric types. + equalsTestCase{true, false, true, "which is not numeric"}, + equalsTestCase{[...]int{}, false, true, "which is not numeric"}, + equalsTestCase{make(chan int), false, true, "which is not numeric"}, + equalsTestCase{func() {}, false, true, "which is not numeric"}, + equalsTestCase{map[int]int{}, false, true, "which is not numeric"}, + equalsTestCase{&someInt, false, true, "which is not numeric"}, + equalsTestCase{[]int{}, false, true, "which is not numeric"}, + equalsTestCase{"taco", false, true, "which is not numeric"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not numeric"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) PositiveNonIntegralComplex128() { + const kTwoTo20 = 1 << 20 + const kExpected = kTwoTo20 + 0.25 + matcher := Equals(complex128(kExpected)) + ExpectEq("(1.04857625e+06+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int64(kTwoTo20), false, false, ""}, + equalsTestCase{int64(kTwoTo20 - 1), false, false, ""}, + equalsTestCase{uint64(kTwoTo20), false, false, ""}, + equalsTestCase{uint64(kTwoTo20 - 1), false, false, ""}, + equalsTestCase{float32(kExpected - 1), false, false, ""}, + equalsTestCase{float32(kExpected + 1), false, false, ""}, + equalsTestCase{float64(kExpected - 0.25), false, false, ""}, + equalsTestCase{float64(kExpected + 0.25), false, false, ""}, + equalsTestCase{complex64(kExpected - 1), false, false, ""}, + equalsTestCase{complex64(kExpected - 1i), false, false, ""}, + equalsTestCase{complex128(kExpected - 1), false, false, ""}, + equalsTestCase{complex128(kExpected - 1i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) LargePositiveComplex128() { + const kExpected = 1 << 65 + matcher := Equals(complex128(kExpected)) + ExpectEq("(3.6893488147419103e+19+0i)", matcher.Description()) + + floatExpected := float64(kExpected) + castedInt := uint64(floatExpected) + + cases := []equalsTestCase{ + // Equal values of numeric type. + equalsTestCase{kExpected + 0i, true, false, ""}, + equalsTestCase{float32(kExpected), true, false, ""}, + equalsTestCase{float64(kExpected), true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{castedInt, false, false, ""}, + equalsTestCase{int64(0), false, false, ""}, + equalsTestCase{int64(math.MinInt64), false, false, ""}, + equalsTestCase{int64(math.MaxInt64), false, false, ""}, + equalsTestCase{uint64(0), false, false, ""}, + equalsTestCase{uint64(math.MaxUint64), false, false, ""}, + equalsTestCase{float32(kExpected / 2), false, false, ""}, + equalsTestCase{float64(kExpected / 2), false, false, ""}, + equalsTestCase{complex128(kExpected + 2i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Complex128AboveExactIntegerRange() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := Equals(complex128(kTwoTo54 + 1)) + ExpectEq("(1.8014398509481984e+16+0i)", matcher.Description()) + + cases := []equalsTestCase{ + // Integers. + equalsTestCase{int64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{int64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{int64(kTwoTo54 + 3), false, false, ""}, + + equalsTestCase{uint64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{uint64(kTwoTo54 + 3), false, false, ""}, + + // Double-precision floating point. + equalsTestCase{float64(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{float64(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{float64(kTwoTo54 + 3), false, false, ""}, + + equalsTestCase{complex128(kTwoTo54 - 2), false, false, ""}, + equalsTestCase{complex128(kTwoTo54 - 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 0), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 1), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 2), true, false, ""}, + equalsTestCase{complex128(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) Complex128WithNonZeroImaginaryPart() { + const kRealPart = 17 + const kImagPart = 0.25i + const kExpected = kRealPart + kImagPart + matcher := Equals(complex128(kExpected)) + ExpectEq("(17+0.25i)", matcher.Description()) + + cases := []equalsTestCase{ + // Various types of the expected value. + equalsTestCase{kExpected, true, false, ""}, + equalsTestCase{kRealPart + kImagPart, true, false, ""}, + equalsTestCase{complex64(kExpected), true, false, ""}, + equalsTestCase{complex128(kExpected), true, false, ""}, + + // Non-equal values of numeric type. + equalsTestCase{int(kRealPart), false, false, ""}, + equalsTestCase{int8(kRealPart), false, false, ""}, + equalsTestCase{int16(kRealPart), false, false, ""}, + equalsTestCase{int32(kRealPart), false, false, ""}, + equalsTestCase{int64(kRealPart), false, false, ""}, + equalsTestCase{uint(kRealPart), false, false, ""}, + equalsTestCase{uint8(kRealPart), false, false, ""}, + equalsTestCase{uint16(kRealPart), false, false, ""}, + equalsTestCase{uint32(kRealPart), false, false, ""}, + equalsTestCase{uint64(kRealPart), false, false, ""}, + equalsTestCase{float32(kRealPart), false, false, ""}, + equalsTestCase{float64(kRealPart), false, false, ""}, + equalsTestCase{complex64(kRealPart), false, false, ""}, + equalsTestCase{complex64(kRealPart + kImagPart + 0.5), false, false, ""}, + equalsTestCase{complex64(kRealPart + kImagPart + 0.5i), false, false, ""}, + equalsTestCase{complex128(kRealPart), false, false, ""}, + equalsTestCase{complex128(kRealPart + kImagPart + 0.5), false, false, ""}, + equalsTestCase{complex128(kRealPart + kImagPart + 0.5i), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Arrays +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) ArrayOfComparableType() { + expected := [3]uint{17, 19, 23} + + matcher := Equals(expected) + ExpectEq("[17 19 23]", matcher.Description()) + + // To defeat constant de-duping by the compiler. + makeArray := func(i, j, k uint) [3]uint { return [3]uint{i, j, k} } + + type arrayAlias [3]uint + type uintAlias uint + + cases := []equalsTestCase{ + // Correct types, equal. + equalsTestCase{expected, true, false, ""}, + equalsTestCase{[3]uint{17, 19, 23}, true, false, ""}, + equalsTestCase{makeArray(17, 19, 23), true, false, ""}, + + // Correct types, not equal. + equalsTestCase{[3]uint{0, 0, 0}, false, false, ""}, + equalsTestCase{[3]uint{18, 19, 23}, false, false, ""}, + equalsTestCase{[3]uint{17, 20, 23}, false, false, ""}, + equalsTestCase{[3]uint{17, 19, 22}, false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not [3]uint"}, + equalsTestCase{bool(false), false, true, "which is not [3]uint"}, + equalsTestCase{int(0), false, true, "which is not [3]uint"}, + equalsTestCase{int8(0), false, true, "which is not [3]uint"}, + equalsTestCase{int16(0), false, true, "which is not [3]uint"}, + equalsTestCase{int32(0), false, true, "which is not [3]uint"}, + equalsTestCase{int64(0), false, true, "which is not [3]uint"}, + equalsTestCase{uint(0), false, true, "which is not [3]uint"}, + equalsTestCase{uint8(0), false, true, "which is not [3]uint"}, + equalsTestCase{uint16(0), false, true, "which is not [3]uint"}, + equalsTestCase{uint32(0), false, true, "which is not [3]uint"}, + equalsTestCase{uint64(0), false, true, "which is not [3]uint"}, + equalsTestCase{true, false, true, "which is not [3]uint"}, + equalsTestCase{[...]int{}, false, true, "which is not [3]uint"}, + equalsTestCase{func() {}, false, true, "which is not [3]uint"}, + equalsTestCase{map[int]int{}, false, true, "which is not [3]uint"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not [3]uint"}, + equalsTestCase{[2]uint{17, 19}, false, true, "which is not [3]uint"}, + equalsTestCase{[4]uint{17, 19, 23, 0}, false, true, "which is not [3]uint"}, + equalsTestCase{arrayAlias{17, 19, 23}, false, true, "which is not [3]uint"}, + equalsTestCase{[3]uintAlias{17, 19, 23}, false, true, "which is not [3]uint"}, + equalsTestCase{[3]int32{17, 19, 23}, false, true, "which is not [3]uint"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ArrayOfNonComparableType() { + type nonComparableArray [2]map[string]string + f := func() { + ExpectEq(nonComparableArray{}, nonComparableArray{}) + } + + ExpectThat(f, Panics(MatchesRegexp("uncomparable.*nonComparableArray"))) +} + +//////////////////////////////////////////////////////////////////////// +// chan +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NilChan() { + var nilChan1 chan int + var nilChan2 chan int + var nilChan3 chan uint + var nonNilChan1 chan int = make(chan int) + var nonNilChan2 chan uint = make(chan uint) + + matcher := Equals(nilChan1) + ExpectEq("", matcher.Description()) + + cases := []equalsTestCase{ + // int channels + equalsTestCase{nilChan1, true, false, ""}, + equalsTestCase{nilChan2, true, false, ""}, + equalsTestCase{nonNilChan1, false, false, ""}, + + // uint channels + equalsTestCase{nilChan3, false, true, "which is not a chan int"}, + equalsTestCase{nonNilChan2, false, true, "which is not a chan int"}, + + // Other types. + equalsTestCase{0, false, true, "which is not a chan int"}, + equalsTestCase{bool(false), false, true, "which is not a chan int"}, + equalsTestCase{int(0), false, true, "which is not a chan int"}, + equalsTestCase{int8(0), false, true, "which is not a chan int"}, + equalsTestCase{int16(0), false, true, "which is not a chan int"}, + equalsTestCase{int32(0), false, true, "which is not a chan int"}, + equalsTestCase{int64(0), false, true, "which is not a chan int"}, + equalsTestCase{uint(0), false, true, "which is not a chan int"}, + equalsTestCase{uint8(0), false, true, "which is not a chan int"}, + equalsTestCase{uint16(0), false, true, "which is not a chan int"}, + equalsTestCase{uint32(0), false, true, "which is not a chan int"}, + equalsTestCase{uint64(0), false, true, "which is not a chan int"}, + equalsTestCase{true, false, true, "which is not a chan int"}, + equalsTestCase{[...]int{}, false, true, "which is not a chan int"}, + equalsTestCase{func() {}, false, true, "which is not a chan int"}, + equalsTestCase{map[int]int{}, false, true, "which is not a chan int"}, + equalsTestCase{&someInt, false, true, "which is not a chan int"}, + equalsTestCase{[]int{}, false, true, "which is not a chan int"}, + equalsTestCase{"taco", false, true, "which is not a chan int"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a chan int"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NonNilChan() { + var nilChan1 chan int + var nilChan2 chan uint + var nonNilChan1 chan int = make(chan int) + var nonNilChan2 chan int = make(chan int) + var nonNilChan3 chan uint = make(chan uint) + + matcher := Equals(nonNilChan1) + ExpectEq(fmt.Sprintf("%v", nonNilChan1), matcher.Description()) + + cases := []equalsTestCase{ + // int channels + equalsTestCase{nonNilChan1, true, false, ""}, + equalsTestCase{nonNilChan2, false, false, ""}, + equalsTestCase{nilChan1, false, false, ""}, + + // uint channels + equalsTestCase{nilChan2, false, true, "which is not a chan int"}, + equalsTestCase{nonNilChan3, false, true, "which is not a chan int"}, + + // Other types. + equalsTestCase{0, false, true, "which is not a chan int"}, + equalsTestCase{bool(false), false, true, "which is not a chan int"}, + equalsTestCase{int(0), false, true, "which is not a chan int"}, + equalsTestCase{int8(0), false, true, "which is not a chan int"}, + equalsTestCase{int16(0), false, true, "which is not a chan int"}, + equalsTestCase{int32(0), false, true, "which is not a chan int"}, + equalsTestCase{int64(0), false, true, "which is not a chan int"}, + equalsTestCase{uint(0), false, true, "which is not a chan int"}, + equalsTestCase{uint8(0), false, true, "which is not a chan int"}, + equalsTestCase{uint16(0), false, true, "which is not a chan int"}, + equalsTestCase{uint32(0), false, true, "which is not a chan int"}, + equalsTestCase{uint64(0), false, true, "which is not a chan int"}, + equalsTestCase{true, false, true, "which is not a chan int"}, + equalsTestCase{[...]int{}, false, true, "which is not a chan int"}, + equalsTestCase{func() {}, false, true, "which is not a chan int"}, + equalsTestCase{map[int]int{}, false, true, "which is not a chan int"}, + equalsTestCase{&someInt, false, true, "which is not a chan int"}, + equalsTestCase{[]int{}, false, true, "which is not a chan int"}, + equalsTestCase{"taco", false, true, "which is not a chan int"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a chan int"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) ChanDirection() { + var chan1 chan<- int + var chan2 <-chan int + var chan3 chan int + + matcher := Equals(chan1) + ExpectEq(fmt.Sprintf("%v", chan1), matcher.Description()) + + cases := []equalsTestCase{ + equalsTestCase{chan1, true, false, ""}, + equalsTestCase{chan2, false, true, "which is not a chan<- int"}, + equalsTestCase{chan3, false, true, "which is not a chan<- int"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// func +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) Functions() { + func1 := func() {} + func2 := func() {} + func3 := func(x int) {} + + matcher := Equals(func1) + ExpectEq(fmt.Sprintf("%v", func1), matcher.Description()) + + cases := []equalsTestCase{ + // Functions. + equalsTestCase{func1, true, false, ""}, + equalsTestCase{func2, false, false, ""}, + equalsTestCase{func3, false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not a function"}, + equalsTestCase{bool(false), false, true, "which is not a function"}, + equalsTestCase{int(0), false, true, "which is not a function"}, + equalsTestCase{int8(0), false, true, "which is not a function"}, + equalsTestCase{int16(0), false, true, "which is not a function"}, + equalsTestCase{int32(0), false, true, "which is not a function"}, + equalsTestCase{int64(0), false, true, "which is not a function"}, + equalsTestCase{uint(0), false, true, "which is not a function"}, + equalsTestCase{uint8(0), false, true, "which is not a function"}, + equalsTestCase{uint16(0), false, true, "which is not a function"}, + equalsTestCase{uint32(0), false, true, "which is not a function"}, + equalsTestCase{uint64(0), false, true, "which is not a function"}, + equalsTestCase{true, false, true, "which is not a function"}, + equalsTestCase{[...]int{}, false, true, "which is not a function"}, + equalsTestCase{map[int]int{}, false, true, "which is not a function"}, + equalsTestCase{&someInt, false, true, "which is not a function"}, + equalsTestCase{[]int{}, false, true, "which is not a function"}, + equalsTestCase{"taco", false, true, "which is not a function"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a function"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// map +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NilMap() { + var nilMap1 map[int]int + var nilMap2 map[int]int + var nilMap3 map[int]uint + var nonNilMap1 map[int]int = make(map[int]int) + var nonNilMap2 map[int]uint = make(map[int]uint) + + matcher := Equals(nilMap1) + ExpectEq("map[]", matcher.Description()) + + cases := []equalsTestCase{ + // Correct type. + equalsTestCase{nilMap1, true, false, ""}, + equalsTestCase{nilMap2, true, false, ""}, + equalsTestCase{nilMap3, true, false, ""}, + equalsTestCase{nonNilMap1, false, false, ""}, + equalsTestCase{nonNilMap2, false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not a map"}, + equalsTestCase{bool(false), false, true, "which is not a map"}, + equalsTestCase{int(0), false, true, "which is not a map"}, + equalsTestCase{int8(0), false, true, "which is not a map"}, + equalsTestCase{int16(0), false, true, "which is not a map"}, + equalsTestCase{int32(0), false, true, "which is not a map"}, + equalsTestCase{int64(0), false, true, "which is not a map"}, + equalsTestCase{uint(0), false, true, "which is not a map"}, + equalsTestCase{uint8(0), false, true, "which is not a map"}, + equalsTestCase{uint16(0), false, true, "which is not a map"}, + equalsTestCase{uint32(0), false, true, "which is not a map"}, + equalsTestCase{uint64(0), false, true, "which is not a map"}, + equalsTestCase{true, false, true, "which is not a map"}, + equalsTestCase{[...]int{}, false, true, "which is not a map"}, + equalsTestCase{func() {}, false, true, "which is not a map"}, + equalsTestCase{&someInt, false, true, "which is not a map"}, + equalsTestCase{[]int{}, false, true, "which is not a map"}, + equalsTestCase{"taco", false, true, "which is not a map"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a map"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NonNilMap() { + var nilMap1 map[int]int + var nilMap2 map[int]uint + var nonNilMap1 map[int]int = make(map[int]int) + var nonNilMap2 map[int]int = make(map[int]int) + var nonNilMap3 map[int]uint = make(map[int]uint) + + matcher := Equals(nonNilMap1) + ExpectEq("map[]", matcher.Description()) + + cases := []equalsTestCase{ + // Correct type. + equalsTestCase{nonNilMap1, true, false, ""}, + equalsTestCase{nonNilMap2, false, false, ""}, + equalsTestCase{nonNilMap3, false, false, ""}, + equalsTestCase{nilMap1, false, false, ""}, + equalsTestCase{nilMap2, false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not a map"}, + equalsTestCase{bool(false), false, true, "which is not a map"}, + equalsTestCase{int(0), false, true, "which is not a map"}, + equalsTestCase{int8(0), false, true, "which is not a map"}, + equalsTestCase{int16(0), false, true, "which is not a map"}, + equalsTestCase{int32(0), false, true, "which is not a map"}, + equalsTestCase{int64(0), false, true, "which is not a map"}, + equalsTestCase{uint(0), false, true, "which is not a map"}, + equalsTestCase{uint8(0), false, true, "which is not a map"}, + equalsTestCase{uint16(0), false, true, "which is not a map"}, + equalsTestCase{uint32(0), false, true, "which is not a map"}, + equalsTestCase{uint64(0), false, true, "which is not a map"}, + equalsTestCase{true, false, true, "which is not a map"}, + equalsTestCase{[...]int{}, false, true, "which is not a map"}, + equalsTestCase{func() {}, false, true, "which is not a map"}, + equalsTestCase{&someInt, false, true, "which is not a map"}, + equalsTestCase{[]int{}, false, true, "which is not a map"}, + equalsTestCase{"taco", false, true, "which is not a map"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a map"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Pointers +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NilPointer() { + var someInt int = 17 + var someUint uint = 17 + + var nilInt1 *int + var nilInt2 *int + var nilUint *uint + var nonNilInt *int = &someInt + var nonNilUint *uint = &someUint + + matcher := Equals(nilInt1) + ExpectEq("", matcher.Description()) + + cases := []equalsTestCase{ + // Correct type. + equalsTestCase{nilInt1, true, false, ""}, + equalsTestCase{nilInt2, true, false, ""}, + equalsTestCase{nonNilInt, false, false, ""}, + + // Incorrect type. + equalsTestCase{nilUint, false, true, "which is not a *int"}, + equalsTestCase{nonNilUint, false, true, "which is not a *int"}, + + // Other types. + equalsTestCase{0, false, true, "which is not a *int"}, + equalsTestCase{bool(false), false, true, "which is not a *int"}, + equalsTestCase{int(0), false, true, "which is not a *int"}, + equalsTestCase{int8(0), false, true, "which is not a *int"}, + equalsTestCase{int16(0), false, true, "which is not a *int"}, + equalsTestCase{int32(0), false, true, "which is not a *int"}, + equalsTestCase{int64(0), false, true, "which is not a *int"}, + equalsTestCase{uint(0), false, true, "which is not a *int"}, + equalsTestCase{uint8(0), false, true, "which is not a *int"}, + equalsTestCase{uint16(0), false, true, "which is not a *int"}, + equalsTestCase{uint32(0), false, true, "which is not a *int"}, + equalsTestCase{uint64(0), false, true, "which is not a *int"}, + equalsTestCase{true, false, true, "which is not a *int"}, + equalsTestCase{[...]int{}, false, true, "which is not a *int"}, + equalsTestCase{func() {}, false, true, "which is not a *int"}, + equalsTestCase{map[int]int{}, false, true, "which is not a *int"}, + equalsTestCase{[]int{}, false, true, "which is not a *int"}, + equalsTestCase{"taco", false, true, "which is not a *int"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a *int"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NonNilPointer() { + var someInt int = 17 + var someOtherInt int = 17 + var someUint uint = 17 + + var nilInt *int + var nilUint *uint + var nonNilInt1 *int = &someInt + var nonNilInt2 *int = &someOtherInt + var nonNilUint *uint = &someUint + + matcher := Equals(nonNilInt1) + ExpectEq(fmt.Sprintf("%v", nonNilInt1), matcher.Description()) + + cases := []equalsTestCase{ + // Correct type. + equalsTestCase{nonNilInt1, true, false, ""}, + equalsTestCase{nonNilInt2, false, false, ""}, + equalsTestCase{nilInt, false, false, ""}, + + // Incorrect type. + equalsTestCase{nilUint, false, true, "which is not a *int"}, + equalsTestCase{nonNilUint, false, true, "which is not a *int"}, + + // Other types. + equalsTestCase{0, false, true, "which is not a *int"}, + equalsTestCase{bool(false), false, true, "which is not a *int"}, + equalsTestCase{int(0), false, true, "which is not a *int"}, + equalsTestCase{int8(0), false, true, "which is not a *int"}, + equalsTestCase{int16(0), false, true, "which is not a *int"}, + equalsTestCase{int32(0), false, true, "which is not a *int"}, + equalsTestCase{int64(0), false, true, "which is not a *int"}, + equalsTestCase{uint(0), false, true, "which is not a *int"}, + equalsTestCase{uint8(0), false, true, "which is not a *int"}, + equalsTestCase{uint16(0), false, true, "which is not a *int"}, + equalsTestCase{uint32(0), false, true, "which is not a *int"}, + equalsTestCase{uint64(0), false, true, "which is not a *int"}, + equalsTestCase{true, false, true, "which is not a *int"}, + equalsTestCase{[...]int{}, false, true, "which is not a *int"}, + equalsTestCase{func() {}, false, true, "which is not a *int"}, + equalsTestCase{map[int]int{}, false, true, "which is not a *int"}, + equalsTestCase{[]int{}, false, true, "which is not a *int"}, + equalsTestCase{"taco", false, true, "which is not a *int"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a *int"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Slices +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NilSlice() { + var nilInt1 []int + var nilInt2 []int + var nilUint []uint + + var nonNilInt []int = make([]int, 0) + var nonNilUint []uint = make([]uint, 0) + + matcher := Equals(nilInt1) + ExpectEq("[]", matcher.Description()) + + cases := []equalsTestCase{ + // Correct type. + equalsTestCase{nilInt1, true, false, ""}, + equalsTestCase{nilInt2, true, false, ""}, + equalsTestCase{nonNilInt, false, false, ""}, + + // Incorrect type. + equalsTestCase{nilUint, false, true, "which is not a []int"}, + equalsTestCase{nonNilUint, false, true, "which is not a []int"}, + + // Other types. + equalsTestCase{0, false, true, "which is not a []int"}, + equalsTestCase{bool(false), false, true, "which is not a []int"}, + equalsTestCase{int(0), false, true, "which is not a []int"}, + equalsTestCase{int8(0), false, true, "which is not a []int"}, + equalsTestCase{int16(0), false, true, "which is not a []int"}, + equalsTestCase{int32(0), false, true, "which is not a []int"}, + equalsTestCase{int64(0), false, true, "which is not a []int"}, + equalsTestCase{uint(0), false, true, "which is not a []int"}, + equalsTestCase{uint8(0), false, true, "which is not a []int"}, + equalsTestCase{uint16(0), false, true, "which is not a []int"}, + equalsTestCase{uint32(0), false, true, "which is not a []int"}, + equalsTestCase{uint64(0), false, true, "which is not a []int"}, + equalsTestCase{true, false, true, "which is not a []int"}, + equalsTestCase{[...]int{}, false, true, "which is not a []int"}, + equalsTestCase{func() {}, false, true, "which is not a []int"}, + equalsTestCase{map[int]int{}, false, true, "which is not a []int"}, + equalsTestCase{"taco", false, true, "which is not a []int"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a []int"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NonNilSlice() { + nonNil := make([]int, 0) + f := func() { Equals(nonNil) } + ExpectThat(f, Panics(HasSubstr("non-nil slice"))) +} + +//////////////////////////////////////////////////////////////////////// +// string +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) String() { + partial := "taco" + expected := fmt.Sprintf("%s%d", partial, 1) + + matcher := Equals(expected) + ExpectEq("taco1", matcher.Description()) + + type stringAlias string + + cases := []equalsTestCase{ + // Correct types. + equalsTestCase{"taco1", true, false, ""}, + equalsTestCase{"taco" + "1", true, false, ""}, + equalsTestCase{expected, true, false, ""}, + equalsTestCase{stringAlias("taco1"), true, false, ""}, + + equalsTestCase{"", false, false, ""}, + equalsTestCase{"taco", false, false, ""}, + equalsTestCase{"taco1\x00", false, false, ""}, + equalsTestCase{"taco2", false, false, ""}, + equalsTestCase{stringAlias("taco2"), false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not a string"}, + equalsTestCase{bool(false), false, true, "which is not a string"}, + equalsTestCase{int(0), false, true, "which is not a string"}, + equalsTestCase{int8(0), false, true, "which is not a string"}, + equalsTestCase{int16(0), false, true, "which is not a string"}, + equalsTestCase{int32(0), false, true, "which is not a string"}, + equalsTestCase{int64(0), false, true, "which is not a string"}, + equalsTestCase{uint(0), false, true, "which is not a string"}, + equalsTestCase{uint8(0), false, true, "which is not a string"}, + equalsTestCase{uint16(0), false, true, "which is not a string"}, + equalsTestCase{uint32(0), false, true, "which is not a string"}, + equalsTestCase{uint64(0), false, true, "which is not a string"}, + equalsTestCase{true, false, true, "which is not a string"}, + equalsTestCase{[...]int{}, false, true, "which is not a string"}, + equalsTestCase{func() {}, false, true, "which is not a string"}, + equalsTestCase{map[int]int{}, false, true, "which is not a string"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a string"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) StringAlias() { + type stringAlias string + + matcher := Equals(stringAlias("taco")) + ExpectEq("taco", matcher.Description()) + + cases := []equalsTestCase{ + // Correct types. + equalsTestCase{stringAlias("taco"), true, false, ""}, + equalsTestCase{"taco", true, false, ""}, + + equalsTestCase{"burrito", false, false, ""}, + equalsTestCase{stringAlias("burrito"), false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not a string"}, + equalsTestCase{bool(false), false, true, "which is not a string"}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// struct +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) Struct() { + type someStruct struct{ foo uint } + f := func() { Equals(someStruct{17}) } + ExpectThat(f, Panics(HasSubstr("unsupported kind struct"))) +} + +//////////////////////////////////////////////////////////////////////// +// unsafe.Pointer +//////////////////////////////////////////////////////////////////////// + +func (t *EqualsTest) NilUnsafePointer() { + someInt := int(17) + + var nilPtr1 unsafe.Pointer + var nilPtr2 unsafe.Pointer + var nonNilPtr unsafe.Pointer = unsafe.Pointer(&someInt) + + matcher := Equals(nilPtr1) + ExpectEq("", matcher.Description()) + + cases := []equalsTestCase{ + // Correct type. + equalsTestCase{nilPtr1, true, false, ""}, + equalsTestCase{nilPtr2, true, false, ""}, + equalsTestCase{nonNilPtr, false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{bool(false), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int8(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int16(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int32(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int64(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint8(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint16(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint32(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint64(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{true, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{[...]int{}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{make(chan int), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{func() {}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{map[int]int{}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{&someInt, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{[]int{}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{"taco", false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a unsafe.Pointer"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *EqualsTest) NonNilUnsafePointer() { + someInt := int(17) + someOtherInt := int(17) + + var nilPtr unsafe.Pointer + var nonNilPtr1 unsafe.Pointer = unsafe.Pointer(&someInt) + var nonNilPtr2 unsafe.Pointer = unsafe.Pointer(&someOtherInt) + + matcher := Equals(nonNilPtr1) + ExpectEq(fmt.Sprintf("%v", nonNilPtr1), matcher.Description()) + + cases := []equalsTestCase{ + // Correct type. + equalsTestCase{nonNilPtr1, true, false, ""}, + equalsTestCase{nonNilPtr2, false, false, ""}, + equalsTestCase{nilPtr, false, false, ""}, + + // Other types. + equalsTestCase{0, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{bool(false), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int8(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int16(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int32(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{int64(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint8(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint16(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint32(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{uint64(0), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{true, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{[...]int{}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{make(chan int), false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{func() {}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{map[int]int{}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{&someInt, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{[]int{}, false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{"taco", false, true, "which is not a unsafe.Pointer"}, + equalsTestCase{equalsTestCase{}, false, true, "which is not a unsafe.Pointer"}, + } + + t.checkTestCases(matcher, cases) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/error_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/error_test.go new file mode 100644 index 0000000000..f92167cad1 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/error_test.go @@ -0,0 +1,92 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "errors" + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type ErrorTest struct { + matcherCalled bool + suppliedCandidate interface{} + wrappedError error + + matcher Matcher +} + +func init() { RegisterTestSuite(&ErrorTest{}) } + +func (t *ErrorTest) SetUp(i *TestInfo) { + wrapped := &fakeMatcher{ + func(c interface{}) error { + t.matcherCalled = true + t.suppliedCandidate = c + return t.wrappedError + }, + "is foo", + } + + t.matcher = Error(wrapped) +} + +func isFatal(err error) bool { + _, isFatal := err.(*FatalError) + return isFatal +} + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *ErrorTest) Description() { + ExpectThat(t.matcher.Description(), Equals("error is foo")) +} + +func (t *ErrorTest) CandidateIsNil() { + err := t.matcher.Matches(nil) + + ExpectThat(t.matcherCalled, Equals(false)) + ExpectThat(err.Error(), Equals("which is not an error")) + ExpectTrue(isFatal(err)) +} + +func (t *ErrorTest) CandidateIsString() { + err := t.matcher.Matches("taco") + + ExpectThat(t.matcherCalled, Equals(false)) + ExpectThat(err.Error(), Equals("which is not an error")) + ExpectTrue(isFatal(err)) +} + +func (t *ErrorTest) CallsWrappedMatcher() { + candidate := errors.New("taco") + t.matcher.Matches(candidate) + + ExpectThat(t.matcherCalled, Equals(true)) + ExpectThat(t.suppliedCandidate, Equals("taco")) +} + +func (t *ErrorTest) ReturnsWrappedMatcherResult() { + t.wrappedError = errors.New("burrito") + err := t.matcher.Matches(errors.New("")) + ExpectThat(err, Equals(t.wrappedError)) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go new file mode 100644 index 0000000000..f5e29d1ce5 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_or_equal_test.go @@ -0,0 +1,1101 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "math" + + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type GreaterOrEqualTest struct { +} + +func init() { RegisterTestSuite(&GreaterOrEqualTest{}) } + +type geTestCase struct { + candidate interface{} + expectedResult bool + shouldBeFatal bool + expectedError string +} + +func (t *GreaterOrEqualTest) checkTestCases(matcher Matcher, cases []geTestCase) { + for i, c := range cases { + err := matcher.Matches(c.candidate) + + ExpectThat( + (err == nil), + Equals(c.expectedResult), + "Case %d (candidate %v)", + i, + c.candidate) + + if err == nil { + continue + } + + _, isFatal := err.(*FatalError) + ExpectEq( + c.shouldBeFatal, + isFatal, + "Case %d (candidate %v)", + i, + c.candidate) + + ExpectThat( + err, + Error(Equals(c.expectedError)), + "Case %d (candidate %v)", + i, + c.candidate) + } +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterOrEqualTest) IntegerCandidateBadTypes() { + matcher := GreaterOrEqual(int(-150)) + + cases := []geTestCase{ + geTestCase{true, false, true, "which is not comparable"}, + geTestCase{complex64(-151), false, true, "which is not comparable"}, + geTestCase{complex128(-151), false, true, "which is not comparable"}, + geTestCase{[...]int{-151}, false, true, "which is not comparable"}, + geTestCase{make(chan int), false, true, "which is not comparable"}, + geTestCase{func() {}, false, true, "which is not comparable"}, + geTestCase{map[int]int{}, false, true, "which is not comparable"}, + geTestCase{&geTestCase{}, false, true, "which is not comparable"}, + geTestCase{make([]int, 0), false, true, "which is not comparable"}, + geTestCase{"-151", false, true, "which is not comparable"}, + geTestCase{geTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) FloatCandidateBadTypes() { + matcher := GreaterOrEqual(float32(-150)) + + cases := []geTestCase{ + geTestCase{true, false, true, "which is not comparable"}, + geTestCase{complex64(-151), false, true, "which is not comparable"}, + geTestCase{complex128(-151), false, true, "which is not comparable"}, + geTestCase{[...]int{-151}, false, true, "which is not comparable"}, + geTestCase{make(chan int), false, true, "which is not comparable"}, + geTestCase{func() {}, false, true, "which is not comparable"}, + geTestCase{map[int]int{}, false, true, "which is not comparable"}, + geTestCase{&geTestCase{}, false, true, "which is not comparable"}, + geTestCase{make([]int, 0), false, true, "which is not comparable"}, + geTestCase{"-151", false, true, "which is not comparable"}, + geTestCase{geTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) StringCandidateBadTypes() { + matcher := GreaterOrEqual("17") + + cases := []geTestCase{ + geTestCase{true, false, true, "which is not comparable"}, + geTestCase{int(0), false, true, "which is not comparable"}, + geTestCase{int8(0), false, true, "which is not comparable"}, + geTestCase{int16(0), false, true, "which is not comparable"}, + geTestCase{int32(0), false, true, "which is not comparable"}, + geTestCase{int64(0), false, true, "which is not comparable"}, + geTestCase{uint(0), false, true, "which is not comparable"}, + geTestCase{uint8(0), false, true, "which is not comparable"}, + geTestCase{uint16(0), false, true, "which is not comparable"}, + geTestCase{uint32(0), false, true, "which is not comparable"}, + geTestCase{uint64(0), false, true, "which is not comparable"}, + geTestCase{float32(0), false, true, "which is not comparable"}, + geTestCase{float64(0), false, true, "which is not comparable"}, + geTestCase{complex64(-151), false, true, "which is not comparable"}, + geTestCase{complex128(-151), false, true, "which is not comparable"}, + geTestCase{[...]int{-151}, false, true, "which is not comparable"}, + geTestCase{make(chan int), false, true, "which is not comparable"}, + geTestCase{func() {}, false, true, "which is not comparable"}, + geTestCase{map[int]int{}, false, true, "which is not comparable"}, + geTestCase{&geTestCase{}, false, true, "which is not comparable"}, + geTestCase{make([]int, 0), false, true, "which is not comparable"}, + geTestCase{geTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) BadArgument() { + panicked := false + + defer func() { + ExpectThat(panicked, Equals(true)) + }() + + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + GreaterOrEqual(complex128(0)) +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterOrEqualTest) NegativeIntegerLiteral() { + matcher := GreaterOrEqual(-150) + desc := matcher.Description() + expectedDesc := "greater than or equal to -150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-(1 << 30), false, false, ""}, + geTestCase{-151, false, false, ""}, + geTestCase{-150, true, false, ""}, + geTestCase{0, true, false, ""}, + geTestCase{17, true, false, ""}, + + geTestCase{int(-(1 << 30)), false, false, ""}, + geTestCase{int(-151), false, false, ""}, + geTestCase{int(-150), true, false, ""}, + geTestCase{int(0), true, false, ""}, + geTestCase{int(17), true, false, ""}, + + geTestCase{int8(-127), true, false, ""}, + geTestCase{int8(0), true, false, ""}, + geTestCase{int8(17), true, false, ""}, + + geTestCase{int16(-(1 << 14)), false, false, ""}, + geTestCase{int16(-151), false, false, ""}, + geTestCase{int16(-150), true, false, ""}, + geTestCase{int16(0), true, false, ""}, + geTestCase{int16(17), true, false, ""}, + + geTestCase{int32(-(1 << 30)), false, false, ""}, + geTestCase{int32(-151), false, false, ""}, + geTestCase{int32(-150), true, false, ""}, + geTestCase{int32(0), true, false, ""}, + geTestCase{int32(17), true, false, ""}, + + geTestCase{int64(-(1 << 30)), false, false, ""}, + geTestCase{int64(-151), false, false, ""}, + geTestCase{int64(-150), true, false, ""}, + geTestCase{int64(0), true, false, ""}, + geTestCase{int64(17), true, false, ""}, + + // Unsigned integers. + geTestCase{uint((1 << 32) - 151), true, false, ""}, + geTestCase{uint(0), true, false, ""}, + geTestCase{uint(17), true, false, ""}, + + geTestCase{uint8(0), true, false, ""}, + geTestCase{uint8(17), true, false, ""}, + geTestCase{uint8(253), true, false, ""}, + + geTestCase{uint16((1 << 16) - 151), true, false, ""}, + geTestCase{uint16(0), true, false, ""}, + geTestCase{uint16(17), true, false, ""}, + + geTestCase{uint32((1 << 32) - 151), true, false, ""}, + geTestCase{uint32(0), true, false, ""}, + geTestCase{uint32(17), true, false, ""}, + + geTestCase{uint64((1 << 64) - 151), true, false, ""}, + geTestCase{uint64(0), true, false, ""}, + geTestCase{uint64(17), true, false, ""}, + + geTestCase{uintptr((1 << 64) - 151), true, false, ""}, + geTestCase{uintptr(0), true, false, ""}, + geTestCase{uintptr(17), true, false, ""}, + + // Floating point. + geTestCase{float32(-(1 << 30)), false, false, ""}, + geTestCase{float32(-151), false, false, ""}, + geTestCase{float32(-150.1), false, false, ""}, + geTestCase{float32(-150), true, false, ""}, + geTestCase{float32(-149.9), true, false, ""}, + geTestCase{float32(0), true, false, ""}, + geTestCase{float32(17), true, false, ""}, + geTestCase{float32(160), true, false, ""}, + + geTestCase{float64(-(1 << 30)), false, false, ""}, + geTestCase{float64(-151), false, false, ""}, + geTestCase{float64(-150.1), false, false, ""}, + geTestCase{float64(-150), true, false, ""}, + geTestCase{float64(-149.9), true, false, ""}, + geTestCase{float64(0), true, false, ""}, + geTestCase{float64(17), true, false, ""}, + geTestCase{float64(160), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) ZeroIntegerLiteral() { + matcher := GreaterOrEqual(0) + desc := matcher.Description() + expectedDesc := "greater than or equal to 0" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-(1 << 30), false, false, ""}, + geTestCase{-1, false, false, ""}, + geTestCase{0, true, false, ""}, + geTestCase{1, true, false, ""}, + geTestCase{17, true, false, ""}, + geTestCase{(1 << 30), true, false, ""}, + + geTestCase{int(-(1 << 30)), false, false, ""}, + geTestCase{int(-1), false, false, ""}, + geTestCase{int(0), true, false, ""}, + geTestCase{int(1), true, false, ""}, + geTestCase{int(17), true, false, ""}, + + geTestCase{int8(-1), false, false, ""}, + geTestCase{int8(0), true, false, ""}, + geTestCase{int8(1), true, false, ""}, + + geTestCase{int16(-(1 << 14)), false, false, ""}, + geTestCase{int16(-1), false, false, ""}, + geTestCase{int16(0), true, false, ""}, + geTestCase{int16(1), true, false, ""}, + geTestCase{int16(17), true, false, ""}, + + geTestCase{int32(-(1 << 30)), false, false, ""}, + geTestCase{int32(-1), false, false, ""}, + geTestCase{int32(0), true, false, ""}, + geTestCase{int32(1), true, false, ""}, + geTestCase{int32(17), true, false, ""}, + + geTestCase{int64(-(1 << 30)), false, false, ""}, + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(0), true, false, ""}, + geTestCase{int64(1), true, false, ""}, + geTestCase{int64(17), true, false, ""}, + + // Unsigned integers. + geTestCase{uint((1 << 32) - 1), true, false, ""}, + geTestCase{uint(0), true, false, ""}, + geTestCase{uint(17), true, false, ""}, + + geTestCase{uint8(0), true, false, ""}, + geTestCase{uint8(17), true, false, ""}, + geTestCase{uint8(253), true, false, ""}, + + geTestCase{uint16((1 << 16) - 1), true, false, ""}, + geTestCase{uint16(0), true, false, ""}, + geTestCase{uint16(17), true, false, ""}, + + geTestCase{uint32((1 << 32) - 1), true, false, ""}, + geTestCase{uint32(0), true, false, ""}, + geTestCase{uint32(17), true, false, ""}, + + geTestCase{uint64((1 << 64) - 1), true, false, ""}, + geTestCase{uint64(0), true, false, ""}, + geTestCase{uint64(17), true, false, ""}, + + geTestCase{uintptr((1 << 64) - 1), true, false, ""}, + geTestCase{uintptr(0), true, false, ""}, + geTestCase{uintptr(17), true, false, ""}, + + // Floating point. + geTestCase{float32(-(1 << 30)), false, false, ""}, + geTestCase{float32(-1), false, false, ""}, + geTestCase{float32(-0.1), false, false, ""}, + geTestCase{float32(-0.0), true, false, ""}, + geTestCase{float32(0), true, false, ""}, + geTestCase{float32(0.1), true, false, ""}, + geTestCase{float32(17), true, false, ""}, + geTestCase{float32(160), true, false, ""}, + + geTestCase{float64(-(1 << 30)), false, false, ""}, + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(-0.1), false, false, ""}, + geTestCase{float64(-0), true, false, ""}, + geTestCase{float64(0), true, false, ""}, + geTestCase{float64(17), true, false, ""}, + geTestCase{float64(160), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) PositiveIntegerLiteral() { + matcher := GreaterOrEqual(150) + desc := matcher.Description() + expectedDesc := "greater than or equal to 150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-1, false, false, ""}, + geTestCase{149, false, false, ""}, + geTestCase{150, true, false, ""}, + geTestCase{151, true, false, ""}, + + geTestCase{int(-1), false, false, ""}, + geTestCase{int(149), false, false, ""}, + geTestCase{int(150), true, false, ""}, + geTestCase{int(151), true, false, ""}, + + geTestCase{int8(-1), false, false, ""}, + geTestCase{int8(0), false, false, ""}, + geTestCase{int8(17), false, false, ""}, + geTestCase{int8(127), false, false, ""}, + + geTestCase{int16(-1), false, false, ""}, + geTestCase{int16(149), false, false, ""}, + geTestCase{int16(150), true, false, ""}, + geTestCase{int16(151), true, false, ""}, + + geTestCase{int32(-1), false, false, ""}, + geTestCase{int32(149), false, false, ""}, + geTestCase{int32(150), true, false, ""}, + geTestCase{int32(151), true, false, ""}, + + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(149), false, false, ""}, + geTestCase{int64(150), true, false, ""}, + geTestCase{int64(151), true, false, ""}, + + // Unsigned integers. + geTestCase{uint(0), false, false, ""}, + geTestCase{uint(149), false, false, ""}, + geTestCase{uint(150), true, false, ""}, + geTestCase{uint(151), true, false, ""}, + + geTestCase{uint8(0), false, false, ""}, + geTestCase{uint8(127), false, false, ""}, + + geTestCase{uint16(0), false, false, ""}, + geTestCase{uint16(149), false, false, ""}, + geTestCase{uint16(150), true, false, ""}, + geTestCase{uint16(151), true, false, ""}, + + geTestCase{uint32(0), false, false, ""}, + geTestCase{uint32(149), false, false, ""}, + geTestCase{uint32(150), true, false, ""}, + geTestCase{uint32(151), true, false, ""}, + + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(149), false, false, ""}, + geTestCase{uint64(150), true, false, ""}, + geTestCase{uint64(151), true, false, ""}, + + geTestCase{uintptr(0), false, false, ""}, + geTestCase{uintptr(149), false, false, ""}, + geTestCase{uintptr(150), true, false, ""}, + geTestCase{uintptr(151), true, false, ""}, + + // Floating point. + geTestCase{float32(-1), false, false, ""}, + geTestCase{float32(149), false, false, ""}, + geTestCase{float32(149.9), false, false, ""}, + geTestCase{float32(150), true, false, ""}, + geTestCase{float32(150.1), true, false, ""}, + geTestCase{float32(151), true, false, ""}, + + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(149), false, false, ""}, + geTestCase{float64(149.9), false, false, ""}, + geTestCase{float64(150), true, false, ""}, + geTestCase{float64(150.1), true, false, ""}, + geTestCase{float64(151), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Float literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterOrEqualTest) NegativeFloatLiteral() { + matcher := GreaterOrEqual(-150.1) + desc := matcher.Description() + expectedDesc := "greater than or equal to -150.1" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-(1 << 30), false, false, ""}, + geTestCase{-151, false, false, ""}, + geTestCase{-150, true, false, ""}, + geTestCase{0, true, false, ""}, + geTestCase{17, true, false, ""}, + + geTestCase{int(-(1 << 30)), false, false, ""}, + geTestCase{int(-151), false, false, ""}, + geTestCase{int(-150), true, false, ""}, + geTestCase{int(0), true, false, ""}, + geTestCase{int(17), true, false, ""}, + + geTestCase{int8(-127), true, false, ""}, + geTestCase{int8(0), true, false, ""}, + geTestCase{int8(17), true, false, ""}, + + geTestCase{int16(-(1 << 14)), false, false, ""}, + geTestCase{int16(-151), false, false, ""}, + geTestCase{int16(-150), true, false, ""}, + geTestCase{int16(0), true, false, ""}, + geTestCase{int16(17), true, false, ""}, + + geTestCase{int32(-(1 << 30)), false, false, ""}, + geTestCase{int32(-151), false, false, ""}, + geTestCase{int32(-150), true, false, ""}, + geTestCase{int32(0), true, false, ""}, + geTestCase{int32(17), true, false, ""}, + + geTestCase{int64(-(1 << 30)), false, false, ""}, + geTestCase{int64(-151), false, false, ""}, + geTestCase{int64(-150), true, false, ""}, + geTestCase{int64(0), true, false, ""}, + geTestCase{int64(17), true, false, ""}, + + // Unsigned integers. + geTestCase{uint((1 << 32) - 151), true, false, ""}, + geTestCase{uint(0), true, false, ""}, + geTestCase{uint(17), true, false, ""}, + + geTestCase{uint8(0), true, false, ""}, + geTestCase{uint8(17), true, false, ""}, + geTestCase{uint8(253), true, false, ""}, + + geTestCase{uint16((1 << 16) - 151), true, false, ""}, + geTestCase{uint16(0), true, false, ""}, + geTestCase{uint16(17), true, false, ""}, + + geTestCase{uint32((1 << 32) - 151), true, false, ""}, + geTestCase{uint32(0), true, false, ""}, + geTestCase{uint32(17), true, false, ""}, + + geTestCase{uint64((1 << 64) - 151), true, false, ""}, + geTestCase{uint64(0), true, false, ""}, + geTestCase{uint64(17), true, false, ""}, + + geTestCase{uintptr((1 << 64) - 151), true, false, ""}, + geTestCase{uintptr(0), true, false, ""}, + geTestCase{uintptr(17), true, false, ""}, + + // Floating point. + geTestCase{float32(-(1 << 30)), false, false, ""}, + geTestCase{float32(-151), false, false, ""}, + geTestCase{float32(-150.2), false, false, ""}, + geTestCase{float32(-150.1), true, false, ""}, + geTestCase{float32(-150), true, false, ""}, + geTestCase{float32(0), true, false, ""}, + geTestCase{float32(17), true, false, ""}, + geTestCase{float32(160), true, false, ""}, + + geTestCase{float64(-(1 << 30)), false, false, ""}, + geTestCase{float64(-151), false, false, ""}, + geTestCase{float64(-150.2), false, false, ""}, + geTestCase{float64(-150.1), true, false, ""}, + geTestCase{float64(-150), true, false, ""}, + geTestCase{float64(0), true, false, ""}, + geTestCase{float64(17), true, false, ""}, + geTestCase{float64(160), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) PositiveFloatLiteral() { + matcher := GreaterOrEqual(149.9) + desc := matcher.Description() + expectedDesc := "greater than or equal to 149.9" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-1, false, false, ""}, + geTestCase{149, false, false, ""}, + geTestCase{150, true, false, ""}, + geTestCase{151, true, false, ""}, + + geTestCase{int(-1), false, false, ""}, + geTestCase{int(149), false, false, ""}, + geTestCase{int(150), true, false, ""}, + geTestCase{int(151), true, false, ""}, + + geTestCase{int8(-1), false, false, ""}, + geTestCase{int8(0), false, false, ""}, + geTestCase{int8(17), false, false, ""}, + geTestCase{int8(127), false, false, ""}, + + geTestCase{int16(-1), false, false, ""}, + geTestCase{int16(149), false, false, ""}, + geTestCase{int16(150), true, false, ""}, + geTestCase{int16(151), true, false, ""}, + + geTestCase{int32(-1), false, false, ""}, + geTestCase{int32(149), false, false, ""}, + geTestCase{int32(150), true, false, ""}, + geTestCase{int32(151), true, false, ""}, + + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(149), false, false, ""}, + geTestCase{int64(150), true, false, ""}, + geTestCase{int64(151), true, false, ""}, + + // Unsigned integers. + geTestCase{uint(0), false, false, ""}, + geTestCase{uint(149), false, false, ""}, + geTestCase{uint(150), true, false, ""}, + geTestCase{uint(151), true, false, ""}, + + geTestCase{uint8(0), false, false, ""}, + geTestCase{uint8(127), false, false, ""}, + + geTestCase{uint16(0), false, false, ""}, + geTestCase{uint16(149), false, false, ""}, + geTestCase{uint16(150), true, false, ""}, + geTestCase{uint16(151), true, false, ""}, + + geTestCase{uint32(0), false, false, ""}, + geTestCase{uint32(149), false, false, ""}, + geTestCase{uint32(150), true, false, ""}, + geTestCase{uint32(151), true, false, ""}, + + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(149), false, false, ""}, + geTestCase{uint64(150), true, false, ""}, + geTestCase{uint64(151), true, false, ""}, + + geTestCase{uintptr(0), false, false, ""}, + geTestCase{uintptr(149), false, false, ""}, + geTestCase{uintptr(150), true, false, ""}, + geTestCase{uintptr(151), true, false, ""}, + + // Floating point. + geTestCase{float32(-1), false, false, ""}, + geTestCase{float32(149), false, false, ""}, + geTestCase{float32(149.8), false, false, ""}, + geTestCase{float32(149.9), true, false, ""}, + geTestCase{float32(150), true, false, ""}, + geTestCase{float32(151), true, false, ""}, + + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(149), false, false, ""}, + geTestCase{float64(149.8), false, false, ""}, + geTestCase{float64(149.9), true, false, ""}, + geTestCase{float64(150), true, false, ""}, + geTestCase{float64(151), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Subtle cases +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterOrEqualTest) Int64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := GreaterOrEqual(int64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than or equal to 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-1, false, false, ""}, + geTestCase{kTwoTo25 + 0, false, false, ""}, + geTestCase{kTwoTo25 + 1, true, false, ""}, + geTestCase{kTwoTo25 + 2, true, false, ""}, + + geTestCase{int(-1), false, false, ""}, + geTestCase{int(kTwoTo25 + 0), false, false, ""}, + geTestCase{int(kTwoTo25 + 1), true, false, ""}, + geTestCase{int(kTwoTo25 + 2), true, false, ""}, + + geTestCase{int8(-1), false, false, ""}, + geTestCase{int8(127), false, false, ""}, + + geTestCase{int16(-1), false, false, ""}, + geTestCase{int16(0), false, false, ""}, + geTestCase{int16(32767), false, false, ""}, + + geTestCase{int32(-1), false, false, ""}, + geTestCase{int32(kTwoTo25 + 0), false, false, ""}, + geTestCase{int32(kTwoTo25 + 1), true, false, ""}, + geTestCase{int32(kTwoTo25 + 2), true, false, ""}, + + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(kTwoTo25 + 0), false, false, ""}, + geTestCase{int64(kTwoTo25 + 1), true, false, ""}, + geTestCase{int64(kTwoTo25 + 2), true, false, ""}, + + // Unsigned integers. + geTestCase{uint(0), false, false, ""}, + geTestCase{uint(kTwoTo25 + 0), false, false, ""}, + geTestCase{uint(kTwoTo25 + 1), true, false, ""}, + geTestCase{uint(kTwoTo25 + 2), true, false, ""}, + + geTestCase{uint8(0), false, false, ""}, + geTestCase{uint8(255), false, false, ""}, + + geTestCase{uint16(0), false, false, ""}, + geTestCase{uint16(65535), false, false, ""}, + + geTestCase{uint32(0), false, false, ""}, + geTestCase{uint32(kTwoTo25 + 0), false, false, ""}, + geTestCase{uint32(kTwoTo25 + 1), true, false, ""}, + geTestCase{uint32(kTwoTo25 + 2), true, false, ""}, + + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + geTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + geTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + + geTestCase{uintptr(0), false, false, ""}, + geTestCase{uintptr(kTwoTo25 + 0), false, false, ""}, + geTestCase{uintptr(kTwoTo25 + 1), true, false, ""}, + geTestCase{uintptr(kTwoTo25 + 2), true, false, ""}, + + // Floating point. + geTestCase{float32(-1), false, false, ""}, + geTestCase{float32(kTwoTo25 - 2), false, false, ""}, + geTestCase{float32(kTwoTo25 - 1), true, false, ""}, + geTestCase{float32(kTwoTo25 + 0), true, false, ""}, + geTestCase{float32(kTwoTo25 + 1), true, false, ""}, + geTestCase{float32(kTwoTo25 + 2), true, false, ""}, + geTestCase{float32(kTwoTo25 + 3), true, false, ""}, + + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(kTwoTo25 - 2), false, false, ""}, + geTestCase{float64(kTwoTo25 - 1), false, false, ""}, + geTestCase{float64(kTwoTo25 + 0), false, false, ""}, + geTestCase{float64(kTwoTo25 + 1), true, false, ""}, + geTestCase{float64(kTwoTo25 + 2), true, false, ""}, + geTestCase{float64(kTwoTo25 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) Int64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := GreaterOrEqual(int64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than or equal to 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-1, false, false, ""}, + geTestCase{1 << 30, false, false, ""}, + + geTestCase{int(-1), false, false, ""}, + geTestCase{int(math.MaxInt32), false, false, ""}, + + geTestCase{int8(-1), false, false, ""}, + geTestCase{int8(127), false, false, ""}, + + geTestCase{int16(-1), false, false, ""}, + geTestCase{int16(0), false, false, ""}, + geTestCase{int16(32767), false, false, ""}, + + geTestCase{int32(-1), false, false, ""}, + geTestCase{int32(math.MaxInt32), false, false, ""}, + + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(kTwoTo54 - 1), false, false, ""}, + geTestCase{int64(kTwoTo54 + 0), false, false, ""}, + geTestCase{int64(kTwoTo54 + 1), true, false, ""}, + geTestCase{int64(kTwoTo54 + 2), true, false, ""}, + + // Unsigned integers. + geTestCase{uint(0), false, false, ""}, + geTestCase{uint(math.MaxUint32), false, false, ""}, + + geTestCase{uint8(0), false, false, ""}, + geTestCase{uint8(255), false, false, ""}, + + geTestCase{uint16(0), false, false, ""}, + geTestCase{uint16(65535), false, false, ""}, + + geTestCase{uint32(0), false, false, ""}, + geTestCase{uint32(math.MaxUint32), false, false, ""}, + + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(kTwoTo54 - 1), false, false, ""}, + geTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + geTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + geTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + + geTestCase{uintptr(0), false, false, ""}, + geTestCase{uintptr(kTwoTo54 - 1), false, false, ""}, + geTestCase{uintptr(kTwoTo54 + 0), false, false, ""}, + geTestCase{uintptr(kTwoTo54 + 1), true, false, ""}, + geTestCase{uintptr(kTwoTo54 + 2), true, false, ""}, + + // Floating point. + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(kTwoTo54 - 2), false, false, ""}, + geTestCase{float64(kTwoTo54 - 1), true, false, ""}, + geTestCase{float64(kTwoTo54 + 0), true, false, ""}, + geTestCase{float64(kTwoTo54 + 1), true, false, ""}, + geTestCase{float64(kTwoTo54 + 2), true, false, ""}, + geTestCase{float64(kTwoTo54 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) Uint64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := GreaterOrEqual(uint64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than or equal to 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-1, false, false, ""}, + geTestCase{kTwoTo25 + 0, false, false, ""}, + geTestCase{kTwoTo25 + 1, true, false, ""}, + geTestCase{kTwoTo25 + 2, true, false, ""}, + + geTestCase{int(-1), false, false, ""}, + geTestCase{int(kTwoTo25 + 0), false, false, ""}, + geTestCase{int(kTwoTo25 + 1), true, false, ""}, + geTestCase{int(kTwoTo25 + 2), true, false, ""}, + + geTestCase{int8(-1), false, false, ""}, + geTestCase{int8(127), false, false, ""}, + + geTestCase{int16(-1), false, false, ""}, + geTestCase{int16(0), false, false, ""}, + geTestCase{int16(32767), false, false, ""}, + + geTestCase{int32(-1), false, false, ""}, + geTestCase{int32(kTwoTo25 + 0), false, false, ""}, + geTestCase{int32(kTwoTo25 + 1), true, false, ""}, + geTestCase{int32(kTwoTo25 + 2), true, false, ""}, + + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(kTwoTo25 + 0), false, false, ""}, + geTestCase{int64(kTwoTo25 + 1), true, false, ""}, + geTestCase{int64(kTwoTo25 + 2), true, false, ""}, + + // Unsigned integers. + geTestCase{uint(0), false, false, ""}, + geTestCase{uint(kTwoTo25 + 0), false, false, ""}, + geTestCase{uint(kTwoTo25 + 1), true, false, ""}, + geTestCase{uint(kTwoTo25 + 2), true, false, ""}, + + geTestCase{uint8(0), false, false, ""}, + geTestCase{uint8(255), false, false, ""}, + + geTestCase{uint16(0), false, false, ""}, + geTestCase{uint16(65535), false, false, ""}, + + geTestCase{uint32(0), false, false, ""}, + geTestCase{uint32(kTwoTo25 + 0), false, false, ""}, + geTestCase{uint32(kTwoTo25 + 1), true, false, ""}, + geTestCase{uint32(kTwoTo25 + 2), true, false, ""}, + + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + geTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + geTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + + geTestCase{uintptr(0), false, false, ""}, + geTestCase{uintptr(kTwoTo25 + 0), false, false, ""}, + geTestCase{uintptr(kTwoTo25 + 1), true, false, ""}, + geTestCase{uintptr(kTwoTo25 + 2), true, false, ""}, + + // Floating point. + geTestCase{float32(-1), false, false, ""}, + geTestCase{float32(kTwoTo25 - 2), false, false, ""}, + geTestCase{float32(kTwoTo25 - 1), true, false, ""}, + geTestCase{float32(kTwoTo25 + 0), true, false, ""}, + geTestCase{float32(kTwoTo25 + 1), true, false, ""}, + geTestCase{float32(kTwoTo25 + 2), true, false, ""}, + geTestCase{float32(kTwoTo25 + 3), true, false, ""}, + + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(kTwoTo25 - 2), false, false, ""}, + geTestCase{float64(kTwoTo25 - 1), false, false, ""}, + geTestCase{float64(kTwoTo25 + 0), false, false, ""}, + geTestCase{float64(kTwoTo25 + 1), true, false, ""}, + geTestCase{float64(kTwoTo25 + 2), true, false, ""}, + geTestCase{float64(kTwoTo25 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) Uint64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := GreaterOrEqual(uint64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than or equal to 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{-1, false, false, ""}, + geTestCase{1 << 30, false, false, ""}, + + geTestCase{int(-1), false, false, ""}, + geTestCase{int(math.MaxInt32), false, false, ""}, + + geTestCase{int8(-1), false, false, ""}, + geTestCase{int8(127), false, false, ""}, + + geTestCase{int16(-1), false, false, ""}, + geTestCase{int16(0), false, false, ""}, + geTestCase{int16(32767), false, false, ""}, + + geTestCase{int32(-1), false, false, ""}, + geTestCase{int32(math.MaxInt32), false, false, ""}, + + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(kTwoTo54 - 1), false, false, ""}, + geTestCase{int64(kTwoTo54 + 0), false, false, ""}, + geTestCase{int64(kTwoTo54 + 1), true, false, ""}, + geTestCase{int64(kTwoTo54 + 2), true, false, ""}, + + // Unsigned integers. + geTestCase{uint(0), false, false, ""}, + geTestCase{uint(math.MaxUint32), false, false, ""}, + + geTestCase{uint8(0), false, false, ""}, + geTestCase{uint8(255), false, false, ""}, + + geTestCase{uint16(0), false, false, ""}, + geTestCase{uint16(65535), false, false, ""}, + + geTestCase{uint32(0), false, false, ""}, + geTestCase{uint32(math.MaxUint32), false, false, ""}, + + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(kTwoTo54 - 1), false, false, ""}, + geTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + geTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + geTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + + geTestCase{uintptr(0), false, false, ""}, + geTestCase{uintptr(kTwoTo54 - 1), false, false, ""}, + geTestCase{uintptr(kTwoTo54 + 0), false, false, ""}, + geTestCase{uintptr(kTwoTo54 + 1), true, false, ""}, + geTestCase{uintptr(kTwoTo54 + 2), true, false, ""}, + + // Floating point. + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(kTwoTo54 - 2), false, false, ""}, + geTestCase{float64(kTwoTo54 - 1), true, false, ""}, + geTestCase{float64(kTwoTo54 + 0), true, false, ""}, + geTestCase{float64(kTwoTo54 + 1), true, false, ""}, + geTestCase{float64(kTwoTo54 + 2), true, false, ""}, + geTestCase{float64(kTwoTo54 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) Float32AboveExactIntegerRange() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := GreaterOrEqual(float32(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than or equal to 3.3554432e+07" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(kTwoTo25 - 2), false, false, ""}, + geTestCase{int64(kTwoTo25 - 1), true, false, ""}, + geTestCase{int64(kTwoTo25 + 0), true, false, ""}, + geTestCase{int64(kTwoTo25 + 1), true, false, ""}, + geTestCase{int64(kTwoTo25 + 2), true, false, ""}, + geTestCase{int64(kTwoTo25 + 3), true, false, ""}, + + // Unsigned integers. + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(kTwoTo25 - 2), false, false, ""}, + geTestCase{uint64(kTwoTo25 - 1), true, false, ""}, + geTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + geTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + geTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + geTestCase{uint64(kTwoTo25 + 3), true, false, ""}, + + // Floating point. + geTestCase{float32(-1), false, false, ""}, + geTestCase{float32(kTwoTo25 - 2), false, false, ""}, + geTestCase{float32(kTwoTo25 - 1), true, false, ""}, + geTestCase{float32(kTwoTo25 + 0), true, false, ""}, + geTestCase{float32(kTwoTo25 + 1), true, false, ""}, + geTestCase{float32(kTwoTo25 + 2), true, false, ""}, + geTestCase{float32(kTwoTo25 + 3), true, false, ""}, + + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(kTwoTo25 - 2), false, false, ""}, + geTestCase{float64(kTwoTo25 - 1), true, false, ""}, + geTestCase{float64(kTwoTo25 + 0), true, false, ""}, + geTestCase{float64(kTwoTo25 + 1), true, false, ""}, + geTestCase{float64(kTwoTo25 + 2), true, false, ""}, + geTestCase{float64(kTwoTo25 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) Float64AboveExactIntegerRange() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := GreaterOrEqual(float64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than or equal to 1.8014398509481984e+16" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + // Signed integers. + geTestCase{int64(-1), false, false, ""}, + geTestCase{int64(kTwoTo54 - 2), false, false, ""}, + geTestCase{int64(kTwoTo54 - 1), true, false, ""}, + geTestCase{int64(kTwoTo54 + 0), true, false, ""}, + geTestCase{int64(kTwoTo54 + 1), true, false, ""}, + geTestCase{int64(kTwoTo54 + 2), true, false, ""}, + geTestCase{int64(kTwoTo54 + 3), true, false, ""}, + + // Unsigned integers. + geTestCase{uint64(0), false, false, ""}, + geTestCase{uint64(kTwoTo54 - 2), false, false, ""}, + geTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + geTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + geTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + geTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + geTestCase{uint64(kTwoTo54 + 3), true, false, ""}, + + // Floating point. + geTestCase{float64(-1), false, false, ""}, + geTestCase{float64(kTwoTo54 - 2), false, false, ""}, + geTestCase{float64(kTwoTo54 - 1), true, false, ""}, + geTestCase{float64(kTwoTo54 + 0), true, false, ""}, + geTestCase{float64(kTwoTo54 + 1), true, false, ""}, + geTestCase{float64(kTwoTo54 + 2), true, false, ""}, + geTestCase{float64(kTwoTo54 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// String literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterOrEqualTest) EmptyString() { + matcher := GreaterOrEqual("") + desc := matcher.Description() + expectedDesc := "greater than or equal to \"\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + geTestCase{"", true, false, ""}, + geTestCase{"\x00", true, false, ""}, + geTestCase{"a", true, false, ""}, + geTestCase{"foo", true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) SingleNullByte() { + matcher := GreaterOrEqual("\x00") + desc := matcher.Description() + expectedDesc := "greater than or equal to \"\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + geTestCase{"", false, false, ""}, + geTestCase{"\x00", true, false, ""}, + geTestCase{"a", true, false, ""}, + geTestCase{"foo", true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterOrEqualTest) LongerString() { + matcher := GreaterOrEqual("foo\x00") + desc := matcher.Description() + expectedDesc := "greater than or equal to \"foo\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []geTestCase{ + geTestCase{"", false, false, ""}, + geTestCase{"\x00", false, false, ""}, + geTestCase{"bar", false, false, ""}, + geTestCase{"foo", false, false, ""}, + geTestCase{"foo\x00", true, false, ""}, + geTestCase{"fooa", true, false, ""}, + geTestCase{"qux", true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go new file mode 100644 index 0000000000..bf70fe5663 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/greater_than_test.go @@ -0,0 +1,1077 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "math" + + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type GreaterThanTest struct { +} + +func init() { RegisterTestSuite(&GreaterThanTest{}) } + +type gtTestCase struct { + candidate interface{} + expectedResult bool + shouldBeFatal bool + expectedError string +} + +func (t *GreaterThanTest) checkTestCases(matcher Matcher, cases []gtTestCase) { + for i, c := range cases { + err := matcher.Matches(c.candidate) + + ExpectThat( + (err == nil), + Equals(c.expectedResult), + "Case %d (candidate %v)", + i, + c.candidate) + + if err == nil { + continue + } + + _, isFatal := err.(*FatalError) + ExpectEq( + c.shouldBeFatal, + isFatal, + "Case %d (candidate %v)", + i, + c.candidate) + + ExpectThat( + err, + Error(Equals(c.expectedError)), + "Case %d (candidate %v)", + i, + c.candidate) + } +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterThanTest) IntegerCandidateBadTypes() { + matcher := GreaterThan(int(-150)) + + cases := []gtTestCase{ + gtTestCase{true, false, true, "which is not comparable"}, + gtTestCase{complex64(-151), false, true, "which is not comparable"}, + gtTestCase{complex128(-151), false, true, "which is not comparable"}, + gtTestCase{[...]int{-151}, false, true, "which is not comparable"}, + gtTestCase{make(chan int), false, true, "which is not comparable"}, + gtTestCase{func() {}, false, true, "which is not comparable"}, + gtTestCase{map[int]int{}, false, true, "which is not comparable"}, + gtTestCase{>TestCase{}, false, true, "which is not comparable"}, + gtTestCase{make([]int, 0), false, true, "which is not comparable"}, + gtTestCase{"-151", false, true, "which is not comparable"}, + gtTestCase{gtTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) FloatCandidateBadTypes() { + matcher := GreaterThan(float32(-150)) + + cases := []gtTestCase{ + gtTestCase{true, false, true, "which is not comparable"}, + gtTestCase{complex64(-151), false, true, "which is not comparable"}, + gtTestCase{complex128(-151), false, true, "which is not comparable"}, + gtTestCase{[...]int{-151}, false, true, "which is not comparable"}, + gtTestCase{make(chan int), false, true, "which is not comparable"}, + gtTestCase{func() {}, false, true, "which is not comparable"}, + gtTestCase{map[int]int{}, false, true, "which is not comparable"}, + gtTestCase{>TestCase{}, false, true, "which is not comparable"}, + gtTestCase{make([]int, 0), false, true, "which is not comparable"}, + gtTestCase{"-151", false, true, "which is not comparable"}, + gtTestCase{gtTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) StringCandidateBadTypes() { + matcher := GreaterThan("17") + + cases := []gtTestCase{ + gtTestCase{true, false, true, "which is not comparable"}, + gtTestCase{int(0), false, true, "which is not comparable"}, + gtTestCase{int8(0), false, true, "which is not comparable"}, + gtTestCase{int16(0), false, true, "which is not comparable"}, + gtTestCase{int32(0), false, true, "which is not comparable"}, + gtTestCase{int64(0), false, true, "which is not comparable"}, + gtTestCase{uint(0), false, true, "which is not comparable"}, + gtTestCase{uint8(0), false, true, "which is not comparable"}, + gtTestCase{uint16(0), false, true, "which is not comparable"}, + gtTestCase{uint32(0), false, true, "which is not comparable"}, + gtTestCase{uint64(0), false, true, "which is not comparable"}, + gtTestCase{float32(0), false, true, "which is not comparable"}, + gtTestCase{float64(0), false, true, "which is not comparable"}, + gtTestCase{complex64(-151), false, true, "which is not comparable"}, + gtTestCase{complex128(-151), false, true, "which is not comparable"}, + gtTestCase{[...]int{-151}, false, true, "which is not comparable"}, + gtTestCase{make(chan int), false, true, "which is not comparable"}, + gtTestCase{func() {}, false, true, "which is not comparable"}, + gtTestCase{map[int]int{}, false, true, "which is not comparable"}, + gtTestCase{>TestCase{}, false, true, "which is not comparable"}, + gtTestCase{make([]int, 0), false, true, "which is not comparable"}, + gtTestCase{gtTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) BadArgument() { + panicked := false + + defer func() { + ExpectThat(panicked, Equals(true)) + }() + + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + GreaterThan(complex128(0)) +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterThanTest) NegativeIntegerLiteral() { + matcher := GreaterThan(-150) + desc := matcher.Description() + expectedDesc := "greater than -150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-(1 << 30), false, false, ""}, + gtTestCase{-151, false, false, ""}, + gtTestCase{-150, false, false, ""}, + gtTestCase{-149, true, false, ""}, + gtTestCase{0, true, false, ""}, + gtTestCase{17, true, false, ""}, + + gtTestCase{int(-(1 << 30)), false, false, ""}, + gtTestCase{int(-151), false, false, ""}, + gtTestCase{int(-150), false, false, ""}, + gtTestCase{int(-149), true, false, ""}, + gtTestCase{int(0), true, false, ""}, + gtTestCase{int(17), true, false, ""}, + + gtTestCase{int8(-127), true, false, ""}, + gtTestCase{int8(0), true, false, ""}, + gtTestCase{int8(17), true, false, ""}, + + gtTestCase{int16(-(1 << 14)), false, false, ""}, + gtTestCase{int16(-151), false, false, ""}, + gtTestCase{int16(-150), false, false, ""}, + gtTestCase{int16(-149), true, false, ""}, + gtTestCase{int16(0), true, false, ""}, + gtTestCase{int16(17), true, false, ""}, + + gtTestCase{int32(-(1 << 30)), false, false, ""}, + gtTestCase{int32(-151), false, false, ""}, + gtTestCase{int32(-150), false, false, ""}, + gtTestCase{int32(-149), true, false, ""}, + gtTestCase{int32(0), true, false, ""}, + gtTestCase{int32(17), true, false, ""}, + + gtTestCase{int64(-(1 << 30)), false, false, ""}, + gtTestCase{int64(-151), false, false, ""}, + gtTestCase{int64(-150), false, false, ""}, + gtTestCase{int64(-149), true, false, ""}, + gtTestCase{int64(0), true, false, ""}, + gtTestCase{int64(17), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint((1 << 32) - 151), true, false, ""}, + gtTestCase{uint(0), true, false, ""}, + gtTestCase{uint(17), true, false, ""}, + + gtTestCase{uint8(0), true, false, ""}, + gtTestCase{uint8(17), true, false, ""}, + gtTestCase{uint8(253), true, false, ""}, + + gtTestCase{uint16((1 << 16) - 151), true, false, ""}, + gtTestCase{uint16(0), true, false, ""}, + gtTestCase{uint16(17), true, false, ""}, + + gtTestCase{uint32((1 << 32) - 151), true, false, ""}, + gtTestCase{uint32(0), true, false, ""}, + gtTestCase{uint32(17), true, false, ""}, + + gtTestCase{uint64((1 << 64) - 151), true, false, ""}, + gtTestCase{uint64(0), true, false, ""}, + gtTestCase{uint64(17), true, false, ""}, + + // Floating point. + gtTestCase{float32(-(1 << 30)), false, false, ""}, + gtTestCase{float32(-151), false, false, ""}, + gtTestCase{float32(-150.1), false, false, ""}, + gtTestCase{float32(-150), false, false, ""}, + gtTestCase{float32(-149.9), true, false, ""}, + gtTestCase{float32(0), true, false, ""}, + gtTestCase{float32(17), true, false, ""}, + gtTestCase{float32(160), true, false, ""}, + + gtTestCase{float64(-(1 << 30)), false, false, ""}, + gtTestCase{float64(-151), false, false, ""}, + gtTestCase{float64(-150.1), false, false, ""}, + gtTestCase{float64(-150), false, false, ""}, + gtTestCase{float64(-149.9), true, false, ""}, + gtTestCase{float64(0), true, false, ""}, + gtTestCase{float64(17), true, false, ""}, + gtTestCase{float64(160), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) ZeroIntegerLiteral() { + matcher := GreaterThan(0) + desc := matcher.Description() + expectedDesc := "greater than 0" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-(1 << 30), false, false, ""}, + gtTestCase{-1, false, false, ""}, + gtTestCase{0, false, false, ""}, + gtTestCase{1, true, false, ""}, + gtTestCase{17, true, false, ""}, + gtTestCase{(1 << 30), true, false, ""}, + + gtTestCase{int(-(1 << 30)), false, false, ""}, + gtTestCase{int(-1), false, false, ""}, + gtTestCase{int(0), false, false, ""}, + gtTestCase{int(1), true, false, ""}, + gtTestCase{int(17), true, false, ""}, + + gtTestCase{int8(-1), false, false, ""}, + gtTestCase{int8(0), false, false, ""}, + gtTestCase{int8(1), true, false, ""}, + + gtTestCase{int16(-(1 << 14)), false, false, ""}, + gtTestCase{int16(-1), false, false, ""}, + gtTestCase{int16(0), false, false, ""}, + gtTestCase{int16(1), true, false, ""}, + gtTestCase{int16(17), true, false, ""}, + + gtTestCase{int32(-(1 << 30)), false, false, ""}, + gtTestCase{int32(-1), false, false, ""}, + gtTestCase{int32(0), false, false, ""}, + gtTestCase{int32(1), true, false, ""}, + gtTestCase{int32(17), true, false, ""}, + + gtTestCase{int64(-(1 << 30)), false, false, ""}, + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(0), false, false, ""}, + gtTestCase{int64(1), true, false, ""}, + gtTestCase{int64(17), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint((1 << 32) - 1), true, false, ""}, + gtTestCase{uint(0), false, false, ""}, + gtTestCase{uint(1), true, false, ""}, + gtTestCase{uint(17), true, false, ""}, + + gtTestCase{uint8(0), false, false, ""}, + gtTestCase{uint8(1), true, false, ""}, + gtTestCase{uint8(17), true, false, ""}, + gtTestCase{uint8(253), true, false, ""}, + + gtTestCase{uint16((1 << 16) - 1), true, false, ""}, + gtTestCase{uint16(0), false, false, ""}, + gtTestCase{uint16(1), true, false, ""}, + gtTestCase{uint16(17), true, false, ""}, + + gtTestCase{uint32((1 << 32) - 1), true, false, ""}, + gtTestCase{uint32(0), false, false, ""}, + gtTestCase{uint32(1), true, false, ""}, + gtTestCase{uint32(17), true, false, ""}, + + gtTestCase{uint64((1 << 64) - 1), true, false, ""}, + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(1), true, false, ""}, + gtTestCase{uint64(17), true, false, ""}, + + // Floating point. + gtTestCase{float32(-(1 << 30)), false, false, ""}, + gtTestCase{float32(-1), false, false, ""}, + gtTestCase{float32(-0.1), false, false, ""}, + gtTestCase{float32(-0.0), false, false, ""}, + gtTestCase{float32(0), false, false, ""}, + gtTestCase{float32(0.1), true, false, ""}, + gtTestCase{float32(17), true, false, ""}, + gtTestCase{float32(160), true, false, ""}, + + gtTestCase{float64(-(1 << 30)), false, false, ""}, + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(-0.1), false, false, ""}, + gtTestCase{float64(-0), false, false, ""}, + gtTestCase{float64(0), false, false, ""}, + gtTestCase{float64(0.1), true, false, ""}, + gtTestCase{float64(17), true, false, ""}, + gtTestCase{float64(160), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) PositiveIntegerLiteral() { + matcher := GreaterThan(150) + desc := matcher.Description() + expectedDesc := "greater than 150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-1, false, false, ""}, + gtTestCase{149, false, false, ""}, + gtTestCase{150, false, false, ""}, + gtTestCase{151, true, false, ""}, + + gtTestCase{int(-1), false, false, ""}, + gtTestCase{int(149), false, false, ""}, + gtTestCase{int(150), false, false, ""}, + gtTestCase{int(151), true, false, ""}, + + gtTestCase{int8(-1), false, false, ""}, + gtTestCase{int8(0), false, false, ""}, + gtTestCase{int8(17), false, false, ""}, + gtTestCase{int8(127), false, false, ""}, + + gtTestCase{int16(-1), false, false, ""}, + gtTestCase{int16(149), false, false, ""}, + gtTestCase{int16(150), false, false, ""}, + gtTestCase{int16(151), true, false, ""}, + + gtTestCase{int32(-1), false, false, ""}, + gtTestCase{int32(149), false, false, ""}, + gtTestCase{int32(150), false, false, ""}, + gtTestCase{int32(151), true, false, ""}, + + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(149), false, false, ""}, + gtTestCase{int64(150), false, false, ""}, + gtTestCase{int64(151), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint(0), false, false, ""}, + gtTestCase{uint(149), false, false, ""}, + gtTestCase{uint(150), false, false, ""}, + gtTestCase{uint(151), true, false, ""}, + + gtTestCase{uint8(0), false, false, ""}, + gtTestCase{uint8(127), false, false, ""}, + + gtTestCase{uint16(0), false, false, ""}, + gtTestCase{uint16(149), false, false, ""}, + gtTestCase{uint16(150), false, false, ""}, + gtTestCase{uint16(151), true, false, ""}, + + gtTestCase{uint32(0), false, false, ""}, + gtTestCase{uint32(149), false, false, ""}, + gtTestCase{uint32(150), false, false, ""}, + gtTestCase{uint32(151), true, false, ""}, + + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(149), false, false, ""}, + gtTestCase{uint64(150), false, false, ""}, + gtTestCase{uint64(151), true, false, ""}, + + // Floating point. + gtTestCase{float32(-1), false, false, ""}, + gtTestCase{float32(149), false, false, ""}, + gtTestCase{float32(149.9), false, false, ""}, + gtTestCase{float32(150), false, false, ""}, + gtTestCase{float32(150.1), true, false, ""}, + gtTestCase{float32(151), true, false, ""}, + + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(149), false, false, ""}, + gtTestCase{float64(149.9), false, false, ""}, + gtTestCase{float64(150), false, false, ""}, + gtTestCase{float64(150.1), true, false, ""}, + gtTestCase{float64(151), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Float literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterThanTest) NegativeFloatLiteral() { + matcher := GreaterThan(-150.1) + desc := matcher.Description() + expectedDesc := "greater than -150.1" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-(1 << 30), false, false, ""}, + gtTestCase{-151, false, false, ""}, + gtTestCase{-150.1, false, false, ""}, + gtTestCase{-150, true, false, ""}, + gtTestCase{-149, true, false, ""}, + gtTestCase{0, true, false, ""}, + gtTestCase{17, true, false, ""}, + + gtTestCase{int(-(1 << 30)), false, false, ""}, + gtTestCase{int(-151), false, false, ""}, + gtTestCase{int(-150), true, false, ""}, + gtTestCase{int(-149), true, false, ""}, + gtTestCase{int(0), true, false, ""}, + gtTestCase{int(17), true, false, ""}, + + gtTestCase{int8(-127), true, false, ""}, + gtTestCase{int8(0), true, false, ""}, + gtTestCase{int8(17), true, false, ""}, + + gtTestCase{int16(-(1 << 14)), false, false, ""}, + gtTestCase{int16(-151), false, false, ""}, + gtTestCase{int16(-150), true, false, ""}, + gtTestCase{int16(-149), true, false, ""}, + gtTestCase{int16(0), true, false, ""}, + gtTestCase{int16(17), true, false, ""}, + + gtTestCase{int32(-(1 << 30)), false, false, ""}, + gtTestCase{int32(-151), false, false, ""}, + gtTestCase{int32(-150), true, false, ""}, + gtTestCase{int32(-149), true, false, ""}, + gtTestCase{int32(0), true, false, ""}, + gtTestCase{int32(17), true, false, ""}, + + gtTestCase{int64(-(1 << 30)), false, false, ""}, + gtTestCase{int64(-151), false, false, ""}, + gtTestCase{int64(-150), true, false, ""}, + gtTestCase{int64(-149), true, false, ""}, + gtTestCase{int64(0), true, false, ""}, + gtTestCase{int64(17), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint((1 << 32) - 151), true, false, ""}, + gtTestCase{uint(0), true, false, ""}, + gtTestCase{uint(17), true, false, ""}, + + gtTestCase{uint8(0), true, false, ""}, + gtTestCase{uint8(17), true, false, ""}, + gtTestCase{uint8(253), true, false, ""}, + + gtTestCase{uint16((1 << 16) - 151), true, false, ""}, + gtTestCase{uint16(0), true, false, ""}, + gtTestCase{uint16(17), true, false, ""}, + + gtTestCase{uint32((1 << 32) - 151), true, false, ""}, + gtTestCase{uint32(0), true, false, ""}, + gtTestCase{uint32(17), true, false, ""}, + + gtTestCase{uint64((1 << 64) - 151), true, false, ""}, + gtTestCase{uint64(0), true, false, ""}, + gtTestCase{uint64(17), true, false, ""}, + + // Floating point. + gtTestCase{float32(-(1 << 30)), false, false, ""}, + gtTestCase{float32(-151), false, false, ""}, + gtTestCase{float32(-150.2), false, false, ""}, + gtTestCase{float32(-150.1), false, false, ""}, + gtTestCase{float32(-150), true, false, ""}, + gtTestCase{float32(0), true, false, ""}, + gtTestCase{float32(17), true, false, ""}, + gtTestCase{float32(160), true, false, ""}, + + gtTestCase{float64(-(1 << 30)), false, false, ""}, + gtTestCase{float64(-151), false, false, ""}, + gtTestCase{float64(-150.2), false, false, ""}, + gtTestCase{float64(-150.1), false, false, ""}, + gtTestCase{float64(-150), true, false, ""}, + gtTestCase{float64(0), true, false, ""}, + gtTestCase{float64(17), true, false, ""}, + gtTestCase{float64(160), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) PositiveFloatLiteral() { + matcher := GreaterThan(149.9) + desc := matcher.Description() + expectedDesc := "greater than 149.9" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-1, false, false, ""}, + gtTestCase{149, false, false, ""}, + gtTestCase{149.9, false, false, ""}, + gtTestCase{150, true, false, ""}, + gtTestCase{151, true, false, ""}, + + gtTestCase{int(-1), false, false, ""}, + gtTestCase{int(149), false, false, ""}, + gtTestCase{int(150), true, false, ""}, + gtTestCase{int(151), true, false, ""}, + + gtTestCase{int8(-1), false, false, ""}, + gtTestCase{int8(0), false, false, ""}, + gtTestCase{int8(17), false, false, ""}, + gtTestCase{int8(127), false, false, ""}, + + gtTestCase{int16(-1), false, false, ""}, + gtTestCase{int16(149), false, false, ""}, + gtTestCase{int16(150), true, false, ""}, + gtTestCase{int16(151), true, false, ""}, + + gtTestCase{int32(-1), false, false, ""}, + gtTestCase{int32(149), false, false, ""}, + gtTestCase{int32(150), true, false, ""}, + gtTestCase{int32(151), true, false, ""}, + + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(149), false, false, ""}, + gtTestCase{int64(150), true, false, ""}, + gtTestCase{int64(151), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint(0), false, false, ""}, + gtTestCase{uint(149), false, false, ""}, + gtTestCase{uint(150), true, false, ""}, + gtTestCase{uint(151), true, false, ""}, + + gtTestCase{uint8(0), false, false, ""}, + gtTestCase{uint8(127), false, false, ""}, + + gtTestCase{uint16(0), false, false, ""}, + gtTestCase{uint16(149), false, false, ""}, + gtTestCase{uint16(150), true, false, ""}, + gtTestCase{uint16(151), true, false, ""}, + + gtTestCase{uint32(0), false, false, ""}, + gtTestCase{uint32(149), false, false, ""}, + gtTestCase{uint32(150), true, false, ""}, + gtTestCase{uint32(151), true, false, ""}, + + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(149), false, false, ""}, + gtTestCase{uint64(150), true, false, ""}, + gtTestCase{uint64(151), true, false, ""}, + + // Floating point. + gtTestCase{float32(-1), false, false, ""}, + gtTestCase{float32(149), false, false, ""}, + gtTestCase{float32(149.8), false, false, ""}, + gtTestCase{float32(149.9), false, false, ""}, + gtTestCase{float32(150), true, false, ""}, + gtTestCase{float32(151), true, false, ""}, + + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(149), false, false, ""}, + gtTestCase{float64(149.8), false, false, ""}, + gtTestCase{float64(149.9), false, false, ""}, + gtTestCase{float64(150), true, false, ""}, + gtTestCase{float64(151), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Subtle cases +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterThanTest) Int64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := GreaterThan(int64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-1, false, false, ""}, + gtTestCase{kTwoTo25 + 0, false, false, ""}, + gtTestCase{kTwoTo25 + 1, false, false, ""}, + gtTestCase{kTwoTo25 + 2, true, false, ""}, + + gtTestCase{int(-1), false, false, ""}, + gtTestCase{int(kTwoTo25 + 0), false, false, ""}, + gtTestCase{int(kTwoTo25 + 1), false, false, ""}, + gtTestCase{int(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{int8(-1), false, false, ""}, + gtTestCase{int8(127), false, false, ""}, + + gtTestCase{int16(-1), false, false, ""}, + gtTestCase{int16(0), false, false, ""}, + gtTestCase{int16(32767), false, false, ""}, + + gtTestCase{int32(-1), false, false, ""}, + gtTestCase{int32(kTwoTo25 + 0), false, false, ""}, + gtTestCase{int32(kTwoTo25 + 1), false, false, ""}, + gtTestCase{int32(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 2), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint(0), false, false, ""}, + gtTestCase{uint(kTwoTo25 + 0), false, false, ""}, + gtTestCase{uint(kTwoTo25 + 1), false, false, ""}, + gtTestCase{uint(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{uint8(0), false, false, ""}, + gtTestCase{uint8(255), false, false, ""}, + + gtTestCase{uint16(0), false, false, ""}, + gtTestCase{uint16(65535), false, false, ""}, + + gtTestCase{uint32(0), false, false, ""}, + gtTestCase{uint32(kTwoTo25 + 0), false, false, ""}, + gtTestCase{uint32(kTwoTo25 + 1), false, false, ""}, + gtTestCase{uint32(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + + // Floating point. + gtTestCase{float32(-1), false, false, ""}, + gtTestCase{float32(kTwoTo25 - 2), false, false, ""}, + gtTestCase{float32(kTwoTo25 - 1), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 0), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 1), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 2), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 3), true, false, ""}, + + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(kTwoTo25 - 2), false, false, ""}, + gtTestCase{float64(kTwoTo25 - 1), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 2), true, false, ""}, + gtTestCase{float64(kTwoTo25 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) Int64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := GreaterThan(int64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-1, false, false, ""}, + gtTestCase{1 << 30, false, false, ""}, + + gtTestCase{int(-1), false, false, ""}, + gtTestCase{int(math.MaxInt32), false, false, ""}, + + gtTestCase{int8(-1), false, false, ""}, + gtTestCase{int8(127), false, false, ""}, + + gtTestCase{int16(-1), false, false, ""}, + gtTestCase{int16(0), false, false, ""}, + gtTestCase{int16(32767), false, false, ""}, + + gtTestCase{int32(-1), false, false, ""}, + gtTestCase{int32(math.MaxInt32), false, false, ""}, + + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 2), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint(0), false, false, ""}, + gtTestCase{uint(math.MaxUint32), false, false, ""}, + + gtTestCase{uint8(0), false, false, ""}, + gtTestCase{uint8(255), false, false, ""}, + + gtTestCase{uint16(0), false, false, ""}, + gtTestCase{uint16(65535), false, false, ""}, + + gtTestCase{uint32(0), false, false, ""}, + gtTestCase{uint32(math.MaxUint32), false, false, ""}, + + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + + // Floating point. + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(kTwoTo54 - 2), false, false, ""}, + gtTestCase{float64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 2), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) Uint64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := GreaterThan(uint64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-1, false, false, ""}, + gtTestCase{kTwoTo25 + 0, false, false, ""}, + gtTestCase{kTwoTo25 + 1, false, false, ""}, + gtTestCase{kTwoTo25 + 2, true, false, ""}, + + gtTestCase{int(-1), false, false, ""}, + gtTestCase{int(kTwoTo25 + 0), false, false, ""}, + gtTestCase{int(kTwoTo25 + 1), false, false, ""}, + gtTestCase{int(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{int8(-1), false, false, ""}, + gtTestCase{int8(127), false, false, ""}, + + gtTestCase{int16(-1), false, false, ""}, + gtTestCase{int16(0), false, false, ""}, + gtTestCase{int16(32767), false, false, ""}, + + gtTestCase{int32(-1), false, false, ""}, + gtTestCase{int32(kTwoTo25 + 0), false, false, ""}, + gtTestCase{int32(kTwoTo25 + 1), false, false, ""}, + gtTestCase{int32(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 2), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint(0), false, false, ""}, + gtTestCase{uint(kTwoTo25 + 0), false, false, ""}, + gtTestCase{uint(kTwoTo25 + 1), false, false, ""}, + gtTestCase{uint(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{uint8(0), false, false, ""}, + gtTestCase{uint8(255), false, false, ""}, + + gtTestCase{uint16(0), false, false, ""}, + gtTestCase{uint16(65535), false, false, ""}, + + gtTestCase{uint32(0), false, false, ""}, + gtTestCase{uint32(kTwoTo25 + 0), false, false, ""}, + gtTestCase{uint32(kTwoTo25 + 1), false, false, ""}, + gtTestCase{uint32(kTwoTo25 + 2), true, false, ""}, + + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + + // Floating point. + gtTestCase{float32(-1), false, false, ""}, + gtTestCase{float32(kTwoTo25 - 2), false, false, ""}, + gtTestCase{float32(kTwoTo25 - 1), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 0), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 1), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 2), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 3), true, false, ""}, + + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(kTwoTo25 - 2), false, false, ""}, + gtTestCase{float64(kTwoTo25 - 1), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 2), true, false, ""}, + gtTestCase{float64(kTwoTo25 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) Uint64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := GreaterThan(uint64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{-1, false, false, ""}, + gtTestCase{1 << 30, false, false, ""}, + + gtTestCase{int(-1), false, false, ""}, + gtTestCase{int(math.MaxInt32), false, false, ""}, + + gtTestCase{int8(-1), false, false, ""}, + gtTestCase{int8(127), false, false, ""}, + + gtTestCase{int16(-1), false, false, ""}, + gtTestCase{int16(0), false, false, ""}, + gtTestCase{int16(32767), false, false, ""}, + + gtTestCase{int32(-1), false, false, ""}, + gtTestCase{int32(math.MaxInt32), false, false, ""}, + + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 2), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint(0), false, false, ""}, + gtTestCase{uint(math.MaxUint32), false, false, ""}, + + gtTestCase{uint8(0), false, false, ""}, + gtTestCase{uint8(255), false, false, ""}, + + gtTestCase{uint16(0), false, false, ""}, + gtTestCase{uint16(65535), false, false, ""}, + + gtTestCase{uint32(0), false, false, ""}, + gtTestCase{uint32(math.MaxUint32), false, false, ""}, + + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + + // Floating point. + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(kTwoTo54 - 2), false, false, ""}, + gtTestCase{float64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 2), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) Float32AboveExactIntegerRange() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := GreaterThan(float32(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than 3.3554432e+07" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(kTwoTo25 - 2), false, false, ""}, + gtTestCase{int64(kTwoTo25 - 1), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 2), false, false, ""}, + gtTestCase{int64(kTwoTo25 + 3), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(kTwoTo25 - 2), false, false, ""}, + gtTestCase{uint64(kTwoTo25 - 1), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + gtTestCase{uint64(kTwoTo25 + 3), true, false, ""}, + + // Floating point. + gtTestCase{float32(-1), false, false, ""}, + gtTestCase{float32(kTwoTo25 - 2), false, false, ""}, + gtTestCase{float32(kTwoTo25 - 1), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 0), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 1), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 2), false, false, ""}, + gtTestCase{float32(kTwoTo25 + 3), true, false, ""}, + + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(kTwoTo25 - 2), false, false, ""}, + gtTestCase{float64(kTwoTo25 - 1), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 0), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 1), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 2), false, false, ""}, + gtTestCase{float64(kTwoTo25 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) Float64AboveExactIntegerRange() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := GreaterThan(float64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "greater than 1.8014398509481984e+16" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + // Signed integers. + gtTestCase{int64(-1), false, false, ""}, + gtTestCase{int64(kTwoTo54 - 2), false, false, ""}, + gtTestCase{int64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 2), false, false, ""}, + gtTestCase{int64(kTwoTo54 + 3), true, false, ""}, + + // Unsigned integers. + gtTestCase{uint64(0), false, false, ""}, + gtTestCase{uint64(kTwoTo54 - 2), false, false, ""}, + gtTestCase{uint64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + gtTestCase{uint64(kTwoTo54 + 3), true, false, ""}, + + // Floating point. + gtTestCase{float64(-1), false, false, ""}, + gtTestCase{float64(kTwoTo54 - 2), false, false, ""}, + gtTestCase{float64(kTwoTo54 - 1), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 0), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 1), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 2), false, false, ""}, + gtTestCase{float64(kTwoTo54 + 3), true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// String literals +//////////////////////////////////////////////////////////////////////// + +func (t *GreaterThanTest) EmptyString() { + matcher := GreaterThan("") + desc := matcher.Description() + expectedDesc := "greater than \"\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + gtTestCase{"", false, false, ""}, + gtTestCase{"\x00", true, false, ""}, + gtTestCase{"a", true, false, ""}, + gtTestCase{"foo", true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) SingleNullByte() { + matcher := GreaterThan("\x00") + desc := matcher.Description() + expectedDesc := "greater than \"\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + gtTestCase{"", false, false, ""}, + gtTestCase{"\x00", false, false, ""}, + gtTestCase{"\x00\x00", true, false, ""}, + gtTestCase{"a", true, false, ""}, + gtTestCase{"foo", true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *GreaterThanTest) LongerString() { + matcher := GreaterThan("foo\x00") + desc := matcher.Description() + expectedDesc := "greater than \"foo\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []gtTestCase{ + gtTestCase{"", false, false, ""}, + gtTestCase{"\x00", false, false, ""}, + gtTestCase{"bar", false, false, ""}, + gtTestCase{"foo", false, false, ""}, + gtTestCase{"foo\x00", false, false, ""}, + gtTestCase{"foo\x00\x00", true, false, ""}, + gtTestCase{"fooa", true, false, ""}, + gtTestCase{"qux", true, false, ""}, + } + + t.checkTestCases(matcher, cases) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go new file mode 100644 index 0000000000..a4a3e308aa --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_same_type_as_test.go @@ -0,0 +1,181 @@ +// Copyright 2015 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "io" + "testing" + + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +func TestHasSameTypeAs(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Boilerplate +//////////////////////////////////////////////////////////////////////// + +type HasSameTypeAsTest struct { +} + +func init() { RegisterTestSuite(&HasSameTypeAsTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *HasSameTypeAsTest) CandidateIsLiteralNil() { + matcher := HasSameTypeAs(nil) + var err error + + // Description + ExpectEq("has type ", matcher.Description()) + + // Literal nil + err = matcher.Matches(nil) + ExpectEq(nil, err) + + // nil in interface variable + var r io.Reader + err = matcher.Matches(r) + ExpectEq(nil, err) + + // int + err = matcher.Matches(17) + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type int"))) + + // string + err = matcher.Matches("") + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type string"))) + + // nil map + var m map[string]string + err = matcher.Matches(m) + + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type map[string]string"))) + + // Non-nil map + m = make(map[string]string) + err = matcher.Matches(m) + + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type map[string]string"))) +} + +func (t *HasSameTypeAsTest) CandidateIsNilMap() { + var m map[string]string + matcher := HasSameTypeAs(m) + var err error + + // Description + ExpectEq("has type map[string]string", matcher.Description()) + + // nil map + m = nil + err = matcher.Matches(m) + ExpectEq(nil, err) + + // Non-nil map + m = make(map[string]string) + err = matcher.Matches(m) + ExpectEq(nil, err) + + // Literal nil + err = matcher.Matches(nil) + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type "))) + + // int + err = matcher.Matches(17) + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type int"))) + + // string + err = matcher.Matches("") + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type string"))) +} + +func (t *HasSameTypeAsTest) CandidateIsNilInInterfaceVariable() { + var r io.Reader + matcher := HasSameTypeAs(r) + var err error + + // Description + ExpectEq("has type ", matcher.Description()) + + // nil in interface variable + r = nil + err = matcher.Matches(r) + ExpectEq(nil, err) + + // Literal nil + err = matcher.Matches(nil) + ExpectEq(nil, err) + + // int + err = matcher.Matches(17) + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type int"))) +} + +func (t *HasSameTypeAsTest) CandidateIsString() { + matcher := HasSameTypeAs("") + var err error + + // Description + ExpectEq("has type string", matcher.Description()) + + // string + err = matcher.Matches("taco") + ExpectEq(nil, err) + + // string alias + type Foo string + err = matcher.Matches(Foo("taco")) + ExpectThat(err, Error(MatchesRegexp("which has type .*Foo"))) + + // Literal nil + err = matcher.Matches(nil) + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type "))) + + // int + err = matcher.Matches(17) + AssertNe(nil, err) + ExpectThat(err, Error(Equals("which has type int"))) +} + +func (t *HasSameTypeAsTest) CandidateIsStringAlias() { + type Foo string + matcher := HasSameTypeAs(Foo("")) + var err error + + // Description + ExpectThat(matcher.Description(), MatchesRegexp("has type .*Foo")) + + // string alias + err = matcher.Matches(Foo("taco")) + ExpectEq(nil, err) + + // string + err = matcher.Matches("taco") + ExpectThat(err, Error(Equals("which has type string"))) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go new file mode 100644 index 0000000000..6fc913a249 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/has_substr_test.go @@ -0,0 +1,93 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type HasSubstrTest struct { + +} + +func init() { RegisterTestSuite(&HasSubstrTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *HasSubstrTest) Description() { + matcher := HasSubstr("taco") + ExpectThat(matcher.Description(), Equals("has substring \"taco\"")) +} + +func (t *HasSubstrTest) CandidateIsNil() { + matcher := HasSubstr("") + err := matcher.Matches(nil) + + ExpectThat(err, Error(Equals("which is not a string"))) + ExpectTrue(isFatal(err)) +} + +func (t *HasSubstrTest) CandidateIsInteger() { + matcher := HasSubstr("") + err := matcher.Matches(17) + + ExpectThat(err, Error(Equals("which is not a string"))) + ExpectTrue(isFatal(err)) +} + +func (t *HasSubstrTest) CandidateIsByteSlice() { + matcher := HasSubstr("") + err := matcher.Matches([]byte{17}) + + ExpectThat(err, Error(Equals("which is not a string"))) + ExpectTrue(isFatal(err)) +} + +func (t *HasSubstrTest) CandidateDoesntHaveSubstring() { + matcher := HasSubstr("taco") + err := matcher.Matches("tac") + + ExpectThat(err, Error(Equals(""))) + ExpectFalse(isFatal(err)) +} + +func (t *HasSubstrTest) CandidateEqualsArg() { + matcher := HasSubstr("taco") + err := matcher.Matches("taco") + + ExpectThat(err, Equals(nil)) +} + +func (t *HasSubstrTest) CandidateHasProperSubstring() { + matcher := HasSubstr("taco") + err := matcher.Matches("burritos and tacos") + + ExpectThat(err, Equals(nil)) +} + +func (t *HasSubstrTest) EmptyStringIsAlwaysSubString() { + matcher := HasSubstr("") + err := matcher.Matches("asdf") + + ExpectThat(err, Equals(nil)) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go new file mode 100644 index 0000000000..cc03b214ad --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/identical_to_test.go @@ -0,0 +1,849 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" + "fmt" + "io" + "unsafe" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type IdenticalToTest struct { +} + +func init() { RegisterTestSuite(&IdenticalToTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *IdenticalToTest) TypesNotIdentical() { + var m Matcher + var err error + + type intAlias int + + // Type alias expected value + m = IdenticalTo(intAlias(17)) + err = m.Matches(int(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int"))) + + // Type alias candidate + m = IdenticalTo(int(17)) + err = m.Matches(intAlias(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.intAlias"))) + + // int and uint + m = IdenticalTo(int(17)) + err = m.Matches(uint(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type uint"))) +} + +func (t *IdenticalToTest) PredeclaredNilIdentifier() { + var m Matcher + var err error + + // Nil literal + m = IdenticalTo(nil) + err = m.Matches(nil) + ExpectEq(nil, err) + + // Zero interface var (which is the same as above since IdenticalTo takes an + // interface{} as an arg) + var nilReader io.Reader + var nilWriter io.Writer + + m = IdenticalTo(nilReader) + err = m.Matches(nilWriter) + ExpectEq(nil, err) + + // Typed nil value. + m = IdenticalTo(nil) + err = m.Matches((chan int)(nil)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type chan int"))) + + // Non-nil value. + m = IdenticalTo(nil) + err = m.Matches("taco") + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type string"))) +} + +func (t *IdenticalToTest) Slices() { + var m Matcher + var err error + + // Nil expected value + m = IdenticalTo(([]int)(nil)) + ExpectEq("identical to <[]int> []", m.Description()) + + err = m.Matches(([]int)(nil)) + ExpectEq(nil, err) + + err = m.Matches([]int{}) + ExpectThat(err, Error(Equals("which is not an identical reference"))) + + // Non-nil expected value + o1 := make([]int, 1) + o2 := make([]int, 1) + m = IdenticalTo(o1) + ExpectEq(fmt.Sprintf("identical to <[]int> %v", o1), m.Description()) + + err = m.Matches(o1) + ExpectEq(nil, err) + + err = m.Matches(o2) + ExpectThat(err, Error(Equals("which is not an identical reference"))) +} + +func (t *IdenticalToTest) Maps() { + var m Matcher + var err error + + // Nil expected value + m = IdenticalTo((map[int]int)(nil)) + ExpectEq("identical to map[]", m.Description()) + + err = m.Matches((map[int]int)(nil)) + ExpectEq(nil, err) + + err = m.Matches(map[int]int{}) + ExpectThat(err, Error(Equals("which is not an identical reference"))) + + // Non-nil expected value + o1 := map[int]int{} + o2 := map[int]int{} + m = IdenticalTo(o1) + ExpectEq(fmt.Sprintf("identical to %v", o1), m.Description()) + + err = m.Matches(o1) + ExpectEq(nil, err) + + err = m.Matches(o2) + ExpectThat(err, Error(Equals("which is not an identical reference"))) +} + +func (t *IdenticalToTest) Functions() { + var m Matcher + var err error + + // Nil expected value + m = IdenticalTo((func())(nil)) + ExpectEq("identical to ", m.Description()) + + err = m.Matches((func())(nil)) + ExpectEq(nil, err) + + err = m.Matches(func(){}) + ExpectThat(err, Error(Equals("which is not an identical reference"))) + + // Non-nil expected value + o1 := func() {} + o2 := func() {} + m = IdenticalTo(o1) + ExpectEq(fmt.Sprintf("identical to %v", o1), m.Description()) + + err = m.Matches(o1) + ExpectEq(nil, err) + + err = m.Matches(o2) + ExpectThat(err, Error(Equals("which is not an identical reference"))) +} + +func (t *IdenticalToTest) Channels() { + var m Matcher + var err error + + // Nil expected value + m = IdenticalTo((chan int)(nil)) + ExpectEq("identical to ", m.Description()) + + err = m.Matches((chan int)(nil)) + ExpectEq(nil, err) + + err = m.Matches(make(chan int)) + ExpectThat(err, Error(Equals("which is not an identical reference"))) + + // Non-nil expected value + o1 := make(chan int) + o2 := make(chan int) + m = IdenticalTo(o1) + ExpectEq(fmt.Sprintf("identical to %v", o1), m.Description()) + + err = m.Matches(o1) + ExpectEq(nil, err) + + err = m.Matches(o2) + ExpectThat(err, Error(Equals("which is not an identical reference"))) +} + +func (t *IdenticalToTest) Bools() { + var m Matcher + var err error + + // false + m = IdenticalTo(false) + ExpectEq("identical to false", m.Description()) + + err = m.Matches(false) + ExpectEq(nil, err) + + err = m.Matches(true) + ExpectThat(err, Error(Equals(""))) + + // true + m = IdenticalTo(true) + ExpectEq("identical to true", m.Description()) + + err = m.Matches(false) + ExpectThat(err, Error(Equals(""))) + + err = m.Matches(true) + ExpectEq(nil, err) +} + +func (t *IdenticalToTest) Ints() { + var m Matcher + var err error + + m = IdenticalTo(int(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(int(17)) + ExpectEq(nil, err) + + // Type alias + type myType int + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Int8s() { + var m Matcher + var err error + + m = IdenticalTo(int8(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(int8(17)) + ExpectEq(nil, err) + + // Type alias + type myType int8 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Int16s() { + var m Matcher + var err error + + m = IdenticalTo(int16(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(int16(17)) + ExpectEq(nil, err) + + // Type alias + type myType int16 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Int32s() { + var m Matcher + var err error + + m = IdenticalTo(int32(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(int32(17)) + ExpectEq(nil, err) + + // Type alias + type myType int32 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int16(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int16"))) +} + +func (t *IdenticalToTest) Int64s() { + var m Matcher + var err error + + m = IdenticalTo(int64(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(int64(17)) + ExpectEq(nil, err) + + // Type alias + type myType int64 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Uints() { + var m Matcher + var err error + + m = IdenticalTo(uint(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(uint(17)) + ExpectEq(nil, err) + + // Type alias + type myType uint + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Uint8s() { + var m Matcher + var err error + + m = IdenticalTo(uint8(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(uint8(17)) + ExpectEq(nil, err) + + // Type alias + type myType uint8 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Uint16s() { + var m Matcher + var err error + + m = IdenticalTo(uint16(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(uint16(17)) + ExpectEq(nil, err) + + // Type alias + type myType uint16 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Uint32s() { + var m Matcher + var err error + + m = IdenticalTo(uint32(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(uint32(17)) + ExpectEq(nil, err) + + // Type alias + type myType uint32 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Uint64s() { + var m Matcher + var err error + + m = IdenticalTo(uint64(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(uint64(17)) + ExpectEq(nil, err) + + // Type alias + type myType uint64 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Uintptrs() { + var m Matcher + var err error + + m = IdenticalTo(uintptr(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(uintptr(17)) + ExpectEq(nil, err) + + // Type alias + type myType uintptr + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Float32s() { + var m Matcher + var err error + + m = IdenticalTo(float32(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(float32(17)) + ExpectEq(nil, err) + + // Type alias + type myType float32 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Float64s() { + var m Matcher + var err error + + m = IdenticalTo(float64(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(float64(17)) + ExpectEq(nil, err) + + // Type alias + type myType float64 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Complex64s() { + var m Matcher + var err error + + m = IdenticalTo(complex64(17)) + ExpectEq("identical to (17+0i)", m.Description()) + + // Identical value + err = m.Matches(complex64(17)) + ExpectEq(nil, err) + + // Type alias + type myType complex64 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) Complex128s() { + var m Matcher + var err error + + m = IdenticalTo(complex128(17)) + ExpectEq("identical to (17+0i)", m.Description()) + + // Identical value + err = m.Matches(complex128(17)) + ExpectEq(nil, err) + + // Type alias + type myType complex128 + err = m.Matches(myType(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) EmptyComparableArrays() { + var m Matcher + var err error + + m = IdenticalTo([0]int{}) + ExpectEq("identical to <[0]int> []", m.Description()) + + // Identical value + err = m.Matches([0]int{}) + ExpectEq(nil, err) + + // Length too long + err = m.Matches([1]int{17}) + ExpectThat(err, Error(Equals("which is of type [1]int"))) + + // Element type alias + type myType int + err = m.Matches([0]myType{}) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type [0]oglematchers_test.myType"))) + + // Completely wrong element type + err = m.Matches([0]int32{}) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type [0]int32"))) +} + +func (t *IdenticalToTest) NonEmptyComparableArrays() { + var m Matcher + var err error + + m = IdenticalTo([2]int{17, 19}) + ExpectEq("identical to <[2]int> [17 19]", m.Description()) + + // Identical value + err = m.Matches([2]int{17, 19}) + ExpectEq(nil, err) + + // Length too short + err = m.Matches([1]int{17}) + ExpectThat(err, Error(Equals("which is of type [1]int"))) + + // Length too long + err = m.Matches([3]int{17, 19, 23}) + ExpectThat(err, Error(Equals("which is of type [3]int"))) + + // First element different + err = m.Matches([2]int{13, 19}) + ExpectThat(err, Error(Equals(""))) + + // Second element different + err = m.Matches([2]int{17, 23}) + ExpectThat(err, Error(Equals(""))) + + // Element type alias + type myType int + err = m.Matches([2]myType{17, 19}) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type [2]oglematchers_test.myType"))) + + // Completely wrong element type + err = m.Matches([2]int32{17, 19}) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type [2]int32"))) +} + +func (t *IdenticalToTest) NonEmptyArraysOfComparableArrays() { + var m Matcher + var err error + + x := [2][2]int{ + [2]int{17, 19}, + [2]int{23, 29}, + } + m = IdenticalTo(x) + ExpectEq("identical to <[2][2]int> [[17 19] [23 29]]", m.Description()) + + // Identical value + err = m.Matches([2][2]int{[2]int{17, 19}, [2]int{23, 29}}) + ExpectEq(nil, err) + + // Outer length too short + err = m.Matches([1][2]int{[2]int{17, 19}}) + ExpectThat(err, Error(Equals("which is of type [1][2]int"))) + + // Inner length too short + err = m.Matches([2][1]int{[1]int{17}, [1]int{23}}) + ExpectThat(err, Error(Equals("which is of type [2][1]int"))) + + // First element different + err = m.Matches([2][2]int{[2]int{13, 19}, [2]int{23, 29}}) + ExpectThat(err, Error(Equals(""))) + + // Element type alias + type myType int + err = m.Matches([2][2]myType{[2]myType{17, 19}, [2]myType{23, 29}}) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type [2][2]oglematchers_test.myType"))) +} + +func (t *IdenticalToTest) NonComparableArrays() { + x := [0]func(){} + f := func() { IdenticalTo(x) } + ExpectThat(f, Panics(HasSubstr("is not comparable"))) +} + +func (t *IdenticalToTest) ArraysOfNonComparableArrays() { + x := [0][0]func(){} + f := func() { IdenticalTo(x) } + ExpectThat(f, Panics(HasSubstr("is not comparable"))) +} + +func (t *IdenticalToTest) Strings() { + var m Matcher + var err error + + m = IdenticalTo("taco") + ExpectEq("identical to taco", m.Description()) + + // Identical value + err = m.Matches("ta" + "co") + ExpectEq(nil, err) + + // Type alias + type myType string + err = m.Matches(myType("taco")) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) ComparableStructs() { + var m Matcher + var err error + + type subStruct struct { + i int + } + + type myStruct struct { + u uint + s subStruct + } + + x := myStruct{17, subStruct{19}} + m = IdenticalTo(x) + ExpectEq("identical to {17 {19}}", m.Description()) + + // Identical value + err = m.Matches(myStruct{17, subStruct{19}}) + ExpectEq(nil, err) + + // Wrong outer field + err = m.Matches(myStruct{13, subStruct{19}}) + ExpectThat(err, Error(Equals(""))) + + // Wrong inner field + err = m.Matches(myStruct{17, subStruct{23}}) + ExpectThat(err, Error(Equals(""))) + + // Type alias + type myType myStruct + err = m.Matches(myType{17, subStruct{19}}) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) NonComparableStructs() { + type subStruct struct { + s []int + } + + type myStruct struct { + u uint + s subStruct + } + + x := myStruct{17, subStruct{[]int{19}}} + f := func() { IdenticalTo(x) } + ExpectThat(f, Panics(AllOf(HasSubstr("IdenticalTo"), HasSubstr("comparable")))) +} + +func (t *IdenticalToTest) NilUnsafePointer() { + var m Matcher + var err error + + x := unsafe.Pointer(nil) + m = IdenticalTo(x) + ExpectEq(fmt.Sprintf("identical to %v", x), m.Description()) + + // Identical value + err = m.Matches(unsafe.Pointer(nil)) + ExpectEq(nil, err) + + // Wrong value + j := 17 + err = m.Matches(unsafe.Pointer(&j)) + ExpectThat(err, Error(Equals(""))) + + // Type alias + type myType unsafe.Pointer + err = m.Matches(myType(unsafe.Pointer(nil))) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) NonNilUnsafePointer() { + var m Matcher + var err error + + i := 17 + x := unsafe.Pointer(&i) + m = IdenticalTo(x) + ExpectEq(fmt.Sprintf("identical to %v", x), m.Description()) + + // Identical value + err = m.Matches(unsafe.Pointer(&i)) + ExpectEq(nil, err) + + // Nil value + err = m.Matches(unsafe.Pointer(nil)) + ExpectThat(err, Error(Equals(""))) + + // Wrong value + j := 17 + err = m.Matches(unsafe.Pointer(&j)) + ExpectThat(err, Error(Equals(""))) + + // Type alias + type myType unsafe.Pointer + err = m.Matches(myType(unsafe.Pointer(&i))) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type oglematchers_test.myType"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} + +func (t *IdenticalToTest) IntAlias() { + var m Matcher + var err error + + type intAlias int + + m = IdenticalTo(intAlias(17)) + ExpectEq("identical to 17", m.Description()) + + // Identical value + err = m.Matches(intAlias(17)) + ExpectEq(nil, err) + + // Int + err = m.Matches(int(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int"))) + + // Completely wrong type + err = m.Matches(int32(17)) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("which is of type int32"))) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go new file mode 100644 index 0000000000..a1a2ae7d60 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_or_equal_test.go @@ -0,0 +1,1077 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "math" + + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type LessOrEqualTest struct { +} + +func init() { RegisterTestSuite(&LessOrEqualTest{}) } + +type leTestCase struct { + candidate interface{} + expectedResult bool + shouldBeFatal bool + expectedError string +} + +func (t *LessOrEqualTest) checkTestCases(matcher Matcher, cases []leTestCase) { + for i, c := range cases { + err := matcher.Matches(c.candidate) + + ExpectThat( + (err == nil), + Equals(c.expectedResult), + "Case %d (candidate %v)", + i, + c.candidate) + + if err == nil { + continue + } + + _, isFatal := err.(*FatalError) + ExpectEq( + c.shouldBeFatal, + isFatal, + "Case %d (candidate %v)", + i, + c.candidate) + + ExpectThat( + err, + Error(Equals(c.expectedError)), + "Case %d (candidate %v)", + i, + c.candidate) + } +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessOrEqualTest) IntegerCandidateBadTypes() { + matcher := LessOrEqual(int(-150)) + + cases := []leTestCase{ + leTestCase{true, false, true, "which is not comparable"}, + leTestCase{complex64(-151), false, true, "which is not comparable"}, + leTestCase{complex128(-151), false, true, "which is not comparable"}, + leTestCase{[...]int{-151}, false, true, "which is not comparable"}, + leTestCase{make(chan int), false, true, "which is not comparable"}, + leTestCase{func() {}, false, true, "which is not comparable"}, + leTestCase{map[int]int{}, false, true, "which is not comparable"}, + leTestCase{&leTestCase{}, false, true, "which is not comparable"}, + leTestCase{make([]int, 0), false, true, "which is not comparable"}, + leTestCase{"-151", false, true, "which is not comparable"}, + leTestCase{leTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) FloatCandidateBadTypes() { + matcher := LessOrEqual(float32(-150)) + + cases := []leTestCase{ + leTestCase{true, false, true, "which is not comparable"}, + leTestCase{complex64(-151), false, true, "which is not comparable"}, + leTestCase{complex128(-151), false, true, "which is not comparable"}, + leTestCase{[...]int{-151}, false, true, "which is not comparable"}, + leTestCase{make(chan int), false, true, "which is not comparable"}, + leTestCase{func() {}, false, true, "which is not comparable"}, + leTestCase{map[int]int{}, false, true, "which is not comparable"}, + leTestCase{&leTestCase{}, false, true, "which is not comparable"}, + leTestCase{make([]int, 0), false, true, "which is not comparable"}, + leTestCase{"-151", false, true, "which is not comparable"}, + leTestCase{leTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) StringCandidateBadTypes() { + matcher := LessOrEqual("17") + + cases := []leTestCase{ + leTestCase{true, false, true, "which is not comparable"}, + leTestCase{int(0), false, true, "which is not comparable"}, + leTestCase{int8(0), false, true, "which is not comparable"}, + leTestCase{int16(0), false, true, "which is not comparable"}, + leTestCase{int32(0), false, true, "which is not comparable"}, + leTestCase{int64(0), false, true, "which is not comparable"}, + leTestCase{uint(0), false, true, "which is not comparable"}, + leTestCase{uint8(0), false, true, "which is not comparable"}, + leTestCase{uint16(0), false, true, "which is not comparable"}, + leTestCase{uint32(0), false, true, "which is not comparable"}, + leTestCase{uint64(0), false, true, "which is not comparable"}, + leTestCase{float32(0), false, true, "which is not comparable"}, + leTestCase{float64(0), false, true, "which is not comparable"}, + leTestCase{complex64(-151), false, true, "which is not comparable"}, + leTestCase{complex128(-151), false, true, "which is not comparable"}, + leTestCase{[...]int{-151}, false, true, "which is not comparable"}, + leTestCase{make(chan int), false, true, "which is not comparable"}, + leTestCase{func() {}, false, true, "which is not comparable"}, + leTestCase{map[int]int{}, false, true, "which is not comparable"}, + leTestCase{&leTestCase{}, false, true, "which is not comparable"}, + leTestCase{make([]int, 0), false, true, "which is not comparable"}, + leTestCase{leTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) BadArgument() { + panicked := false + + defer func() { + ExpectThat(panicked, Equals(true)) + }() + + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + LessOrEqual(complex128(0)) +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessOrEqualTest) NegativeIntegerLiteral() { + matcher := LessOrEqual(-150) + desc := matcher.Description() + expectedDesc := "less than or equal to -150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-(1 << 30), true, false, ""}, + leTestCase{-151, true, false, ""}, + leTestCase{-150, true, false, ""}, + leTestCase{-149, false, false, ""}, + leTestCase{0, false, false, ""}, + leTestCase{17, false, false, ""}, + + leTestCase{int(-(1 << 30)), true, false, ""}, + leTestCase{int(-151), true, false, ""}, + leTestCase{int(-150), true, false, ""}, + leTestCase{int(-149), false, false, ""}, + leTestCase{int(0), false, false, ""}, + leTestCase{int(17), false, false, ""}, + + leTestCase{int8(-127), false, false, ""}, + leTestCase{int8(0), false, false, ""}, + leTestCase{int8(17), false, false, ""}, + + leTestCase{int16(-(1 << 14)), true, false, ""}, + leTestCase{int16(-151), true, false, ""}, + leTestCase{int16(-150), true, false, ""}, + leTestCase{int16(-149), false, false, ""}, + leTestCase{int16(0), false, false, ""}, + leTestCase{int16(17), false, false, ""}, + + leTestCase{int32(-(1 << 30)), true, false, ""}, + leTestCase{int32(-151), true, false, ""}, + leTestCase{int32(-150), true, false, ""}, + leTestCase{int32(-149), false, false, ""}, + leTestCase{int32(0), false, false, ""}, + leTestCase{int32(17), false, false, ""}, + + leTestCase{int64(-(1 << 30)), true, false, ""}, + leTestCase{int64(-151), true, false, ""}, + leTestCase{int64(-150), true, false, ""}, + leTestCase{int64(-149), false, false, ""}, + leTestCase{int64(0), false, false, ""}, + leTestCase{int64(17), false, false, ""}, + + // Unsigned integers. + leTestCase{uint((1 << 32) - 151), false, false, ""}, + leTestCase{uint(0), false, false, ""}, + leTestCase{uint(17), false, false, ""}, + + leTestCase{uint8(0), false, false, ""}, + leTestCase{uint8(17), false, false, ""}, + leTestCase{uint8(253), false, false, ""}, + + leTestCase{uint16((1 << 16) - 151), false, false, ""}, + leTestCase{uint16(0), false, false, ""}, + leTestCase{uint16(17), false, false, ""}, + + leTestCase{uint32((1 << 32) - 151), false, false, ""}, + leTestCase{uint32(0), false, false, ""}, + leTestCase{uint32(17), false, false, ""}, + + leTestCase{uint64((1 << 64) - 151), false, false, ""}, + leTestCase{uint64(0), false, false, ""}, + leTestCase{uint64(17), false, false, ""}, + + // Floating point. + leTestCase{float32(-(1 << 30)), true, false, ""}, + leTestCase{float32(-151), true, false, ""}, + leTestCase{float32(-150.1), true, false, ""}, + leTestCase{float32(-150), true, false, ""}, + leTestCase{float32(-149.9), false, false, ""}, + leTestCase{float32(0), false, false, ""}, + leTestCase{float32(17), false, false, ""}, + leTestCase{float32(160), false, false, ""}, + + leTestCase{float64(-(1 << 30)), true, false, ""}, + leTestCase{float64(-151), true, false, ""}, + leTestCase{float64(-150.1), true, false, ""}, + leTestCase{float64(-150), true, false, ""}, + leTestCase{float64(-149.9), false, false, ""}, + leTestCase{float64(0), false, false, ""}, + leTestCase{float64(17), false, false, ""}, + leTestCase{float64(160), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) ZeroIntegerLiteral() { + matcher := LessOrEqual(0) + desc := matcher.Description() + expectedDesc := "less than or equal to 0" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-(1 << 30), true, false, ""}, + leTestCase{-1, true, false, ""}, + leTestCase{0, true, false, ""}, + leTestCase{1, false, false, ""}, + leTestCase{17, false, false, ""}, + leTestCase{(1 << 30), false, false, ""}, + + leTestCase{int(-(1 << 30)), true, false, ""}, + leTestCase{int(-1), true, false, ""}, + leTestCase{int(0), true, false, ""}, + leTestCase{int(1), false, false, ""}, + leTestCase{int(17), false, false, ""}, + + leTestCase{int8(-1), true, false, ""}, + leTestCase{int8(0), true, false, ""}, + leTestCase{int8(1), false, false, ""}, + + leTestCase{int16(-(1 << 14)), true, false, ""}, + leTestCase{int16(-1), true, false, ""}, + leTestCase{int16(0), true, false, ""}, + leTestCase{int16(1), false, false, ""}, + leTestCase{int16(17), false, false, ""}, + + leTestCase{int32(-(1 << 30)), true, false, ""}, + leTestCase{int32(-1), true, false, ""}, + leTestCase{int32(0), true, false, ""}, + leTestCase{int32(1), false, false, ""}, + leTestCase{int32(17), false, false, ""}, + + leTestCase{int64(-(1 << 30)), true, false, ""}, + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(0), true, false, ""}, + leTestCase{int64(1), false, false, ""}, + leTestCase{int64(17), false, false, ""}, + + // Unsigned integers. + leTestCase{uint((1 << 32) - 1), false, false, ""}, + leTestCase{uint(0), true, false, ""}, + leTestCase{uint(1), false, false, ""}, + leTestCase{uint(17), false, false, ""}, + + leTestCase{uint8(0), true, false, ""}, + leTestCase{uint8(1), false, false, ""}, + leTestCase{uint8(17), false, false, ""}, + leTestCase{uint8(253), false, false, ""}, + + leTestCase{uint16((1 << 16) - 1), false, false, ""}, + leTestCase{uint16(0), true, false, ""}, + leTestCase{uint16(1), false, false, ""}, + leTestCase{uint16(17), false, false, ""}, + + leTestCase{uint32((1 << 32) - 1), false, false, ""}, + leTestCase{uint32(0), true, false, ""}, + leTestCase{uint32(1), false, false, ""}, + leTestCase{uint32(17), false, false, ""}, + + leTestCase{uint64((1 << 64) - 1), false, false, ""}, + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(1), false, false, ""}, + leTestCase{uint64(17), false, false, ""}, + + // Floating point. + leTestCase{float32(-(1 << 30)), true, false, ""}, + leTestCase{float32(-1), true, false, ""}, + leTestCase{float32(-0.1), true, false, ""}, + leTestCase{float32(-0.0), true, false, ""}, + leTestCase{float32(0), true, false, ""}, + leTestCase{float32(0.1), false, false, ""}, + leTestCase{float32(17), false, false, ""}, + leTestCase{float32(160), false, false, ""}, + + leTestCase{float64(-(1 << 30)), true, false, ""}, + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(-0.1), true, false, ""}, + leTestCase{float64(-0), true, false, ""}, + leTestCase{float64(0), true, false, ""}, + leTestCase{float64(0.1), false, false, ""}, + leTestCase{float64(17), false, false, ""}, + leTestCase{float64(160), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) PositiveIntegerLiteral() { + matcher := LessOrEqual(150) + desc := matcher.Description() + expectedDesc := "less than or equal to 150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-1, true, false, ""}, + leTestCase{149, true, false, ""}, + leTestCase{150, true, false, ""}, + leTestCase{151, false, false, ""}, + + leTestCase{int(-1), true, false, ""}, + leTestCase{int(149), true, false, ""}, + leTestCase{int(150), true, false, ""}, + leTestCase{int(151), false, false, ""}, + + leTestCase{int8(-1), true, false, ""}, + leTestCase{int8(0), true, false, ""}, + leTestCase{int8(17), true, false, ""}, + leTestCase{int8(127), true, false, ""}, + + leTestCase{int16(-1), true, false, ""}, + leTestCase{int16(149), true, false, ""}, + leTestCase{int16(150), true, false, ""}, + leTestCase{int16(151), false, false, ""}, + + leTestCase{int32(-1), true, false, ""}, + leTestCase{int32(149), true, false, ""}, + leTestCase{int32(150), true, false, ""}, + leTestCase{int32(151), false, false, ""}, + + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(149), true, false, ""}, + leTestCase{int64(150), true, false, ""}, + leTestCase{int64(151), false, false, ""}, + + // Unsigned integers. + leTestCase{uint(0), true, false, ""}, + leTestCase{uint(149), true, false, ""}, + leTestCase{uint(150), true, false, ""}, + leTestCase{uint(151), false, false, ""}, + + leTestCase{uint8(0), true, false, ""}, + leTestCase{uint8(127), true, false, ""}, + + leTestCase{uint16(0), true, false, ""}, + leTestCase{uint16(149), true, false, ""}, + leTestCase{uint16(150), true, false, ""}, + leTestCase{uint16(151), false, false, ""}, + + leTestCase{uint32(0), true, false, ""}, + leTestCase{uint32(149), true, false, ""}, + leTestCase{uint32(150), true, false, ""}, + leTestCase{uint32(151), false, false, ""}, + + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(149), true, false, ""}, + leTestCase{uint64(150), true, false, ""}, + leTestCase{uint64(151), false, false, ""}, + + // Floating point. + leTestCase{float32(-1), true, false, ""}, + leTestCase{float32(149), true, false, ""}, + leTestCase{float32(149.9), true, false, ""}, + leTestCase{float32(150), true, false, ""}, + leTestCase{float32(150.1), false, false, ""}, + leTestCase{float32(151), false, false, ""}, + + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(149), true, false, ""}, + leTestCase{float64(149.9), true, false, ""}, + leTestCase{float64(150), true, false, ""}, + leTestCase{float64(150.1), false, false, ""}, + leTestCase{float64(151), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Float literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessOrEqualTest) NegativeFloatLiteral() { + matcher := LessOrEqual(-150.1) + desc := matcher.Description() + expectedDesc := "less than or equal to -150.1" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-(1 << 30), true, false, ""}, + leTestCase{-151, true, false, ""}, + leTestCase{-150.1, true, false, ""}, + leTestCase{-150, false, false, ""}, + leTestCase{-149, false, false, ""}, + leTestCase{0, false, false, ""}, + leTestCase{17, false, false, ""}, + + leTestCase{int(-(1 << 30)), true, false, ""}, + leTestCase{int(-151), true, false, ""}, + leTestCase{int(-150), false, false, ""}, + leTestCase{int(-149), false, false, ""}, + leTestCase{int(0), false, false, ""}, + leTestCase{int(17), false, false, ""}, + + leTestCase{int8(-127), false, false, ""}, + leTestCase{int8(0), false, false, ""}, + leTestCase{int8(17), false, false, ""}, + + leTestCase{int16(-(1 << 14)), true, false, ""}, + leTestCase{int16(-151), true, false, ""}, + leTestCase{int16(-150), false, false, ""}, + leTestCase{int16(-149), false, false, ""}, + leTestCase{int16(0), false, false, ""}, + leTestCase{int16(17), false, false, ""}, + + leTestCase{int32(-(1 << 30)), true, false, ""}, + leTestCase{int32(-151), true, false, ""}, + leTestCase{int32(-150), false, false, ""}, + leTestCase{int32(-149), false, false, ""}, + leTestCase{int32(0), false, false, ""}, + leTestCase{int32(17), false, false, ""}, + + leTestCase{int64(-(1 << 30)), true, false, ""}, + leTestCase{int64(-151), true, false, ""}, + leTestCase{int64(-150), false, false, ""}, + leTestCase{int64(-149), false, false, ""}, + leTestCase{int64(0), false, false, ""}, + leTestCase{int64(17), false, false, ""}, + + // Unsigned integers. + leTestCase{uint((1 << 32) - 151), false, false, ""}, + leTestCase{uint(0), false, false, ""}, + leTestCase{uint(17), false, false, ""}, + + leTestCase{uint8(0), false, false, ""}, + leTestCase{uint8(17), false, false, ""}, + leTestCase{uint8(253), false, false, ""}, + + leTestCase{uint16((1 << 16) - 151), false, false, ""}, + leTestCase{uint16(0), false, false, ""}, + leTestCase{uint16(17), false, false, ""}, + + leTestCase{uint32((1 << 32) - 151), false, false, ""}, + leTestCase{uint32(0), false, false, ""}, + leTestCase{uint32(17), false, false, ""}, + + leTestCase{uint64((1 << 64) - 151), false, false, ""}, + leTestCase{uint64(0), false, false, ""}, + leTestCase{uint64(17), false, false, ""}, + + // Floating point. + leTestCase{float32(-(1 << 30)), true, false, ""}, + leTestCase{float32(-151), true, false, ""}, + leTestCase{float32(-150.2), true, false, ""}, + leTestCase{float32(-150.1), true, false, ""}, + leTestCase{float32(-150), false, false, ""}, + leTestCase{float32(0), false, false, ""}, + leTestCase{float32(17), false, false, ""}, + leTestCase{float32(160), false, false, ""}, + + leTestCase{float64(-(1 << 30)), true, false, ""}, + leTestCase{float64(-151), true, false, ""}, + leTestCase{float64(-150.2), true, false, ""}, + leTestCase{float64(-150.1), true, false, ""}, + leTestCase{float64(-150), false, false, ""}, + leTestCase{float64(0), false, false, ""}, + leTestCase{float64(17), false, false, ""}, + leTestCase{float64(160), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) PositiveFloatLiteral() { + matcher := LessOrEqual(149.9) + desc := matcher.Description() + expectedDesc := "less than or equal to 149.9" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-1, true, false, ""}, + leTestCase{149, true, false, ""}, + leTestCase{149.9, true, false, ""}, + leTestCase{150, false, false, ""}, + leTestCase{151, false, false, ""}, + + leTestCase{int(-1), true, false, ""}, + leTestCase{int(149), true, false, ""}, + leTestCase{int(150), false, false, ""}, + leTestCase{int(151), false, false, ""}, + + leTestCase{int8(-1), true, false, ""}, + leTestCase{int8(0), true, false, ""}, + leTestCase{int8(17), true, false, ""}, + leTestCase{int8(127), true, false, ""}, + + leTestCase{int16(-1), true, false, ""}, + leTestCase{int16(149), true, false, ""}, + leTestCase{int16(150), false, false, ""}, + leTestCase{int16(151), false, false, ""}, + + leTestCase{int32(-1), true, false, ""}, + leTestCase{int32(149), true, false, ""}, + leTestCase{int32(150), false, false, ""}, + leTestCase{int32(151), false, false, ""}, + + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(149), true, false, ""}, + leTestCase{int64(150), false, false, ""}, + leTestCase{int64(151), false, false, ""}, + + // Unsigned integers. + leTestCase{uint(0), true, false, ""}, + leTestCase{uint(149), true, false, ""}, + leTestCase{uint(150), false, false, ""}, + leTestCase{uint(151), false, false, ""}, + + leTestCase{uint8(0), true, false, ""}, + leTestCase{uint8(127), true, false, ""}, + + leTestCase{uint16(0), true, false, ""}, + leTestCase{uint16(149), true, false, ""}, + leTestCase{uint16(150), false, false, ""}, + leTestCase{uint16(151), false, false, ""}, + + leTestCase{uint32(0), true, false, ""}, + leTestCase{uint32(149), true, false, ""}, + leTestCase{uint32(150), false, false, ""}, + leTestCase{uint32(151), false, false, ""}, + + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(149), true, false, ""}, + leTestCase{uint64(150), false, false, ""}, + leTestCase{uint64(151), false, false, ""}, + + // Floating point. + leTestCase{float32(-1), true, false, ""}, + leTestCase{float32(149), true, false, ""}, + leTestCase{float32(149.8), true, false, ""}, + leTestCase{float32(149.9), true, false, ""}, + leTestCase{float32(150), false, false, ""}, + leTestCase{float32(151), false, false, ""}, + + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(149), true, false, ""}, + leTestCase{float64(149.8), true, false, ""}, + leTestCase{float64(149.9), true, false, ""}, + leTestCase{float64(150), false, false, ""}, + leTestCase{float64(151), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Subtle cases +//////////////////////////////////////////////////////////////////////// + +func (t *LessOrEqualTest) Int64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := LessOrEqual(int64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "less than or equal to 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-1, true, false, ""}, + leTestCase{kTwoTo25 + 0, true, false, ""}, + leTestCase{kTwoTo25 + 1, true, false, ""}, + leTestCase{kTwoTo25 + 2, false, false, ""}, + + leTestCase{int(-1), true, false, ""}, + leTestCase{int(kTwoTo25 + 0), true, false, ""}, + leTestCase{int(kTwoTo25 + 1), true, false, ""}, + leTestCase{int(kTwoTo25 + 2), false, false, ""}, + + leTestCase{int8(-1), true, false, ""}, + leTestCase{int8(127), true, false, ""}, + + leTestCase{int16(-1), true, false, ""}, + leTestCase{int16(0), true, false, ""}, + leTestCase{int16(32767), true, false, ""}, + + leTestCase{int32(-1), true, false, ""}, + leTestCase{int32(kTwoTo25 + 0), true, false, ""}, + leTestCase{int32(kTwoTo25 + 1), true, false, ""}, + leTestCase{int32(kTwoTo25 + 2), false, false, ""}, + + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(kTwoTo25 + 0), true, false, ""}, + leTestCase{int64(kTwoTo25 + 1), true, false, ""}, + leTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + // Unsigned integers. + leTestCase{uint(0), true, false, ""}, + leTestCase{uint(kTwoTo25 + 0), true, false, ""}, + leTestCase{uint(kTwoTo25 + 1), true, false, ""}, + leTestCase{uint(kTwoTo25 + 2), false, false, ""}, + + leTestCase{uint8(0), true, false, ""}, + leTestCase{uint8(255), true, false, ""}, + + leTestCase{uint16(0), true, false, ""}, + leTestCase{uint16(65535), true, false, ""}, + + leTestCase{uint32(0), true, false, ""}, + leTestCase{uint32(kTwoTo25 + 0), true, false, ""}, + leTestCase{uint32(kTwoTo25 + 1), true, false, ""}, + leTestCase{uint32(kTwoTo25 + 2), false, false, ""}, + + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Floating point. + leTestCase{float32(-1), true, false, ""}, + leTestCase{float32(kTwoTo25 - 2), true, false, ""}, + leTestCase{float32(kTwoTo25 - 1), true, false, ""}, + leTestCase{float32(kTwoTo25 + 0), true, false, ""}, + leTestCase{float32(kTwoTo25 + 1), true, false, ""}, + leTestCase{float32(kTwoTo25 + 2), true, false, ""}, + leTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(kTwoTo25 - 2), true, false, ""}, + leTestCase{float64(kTwoTo25 - 1), true, false, ""}, + leTestCase{float64(kTwoTo25 + 0), true, false, ""}, + leTestCase{float64(kTwoTo25 + 1), true, false, ""}, + leTestCase{float64(kTwoTo25 + 2), false, false, ""}, + leTestCase{float64(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) Int64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := LessOrEqual(int64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "less than or equal to 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-1, true, false, ""}, + leTestCase{1 << 30, true, false, ""}, + + leTestCase{int(-1), true, false, ""}, + leTestCase{int(math.MaxInt32), true, false, ""}, + + leTestCase{int8(-1), true, false, ""}, + leTestCase{int8(127), true, false, ""}, + + leTestCase{int16(-1), true, false, ""}, + leTestCase{int16(0), true, false, ""}, + leTestCase{int16(32767), true, false, ""}, + + leTestCase{int32(-1), true, false, ""}, + leTestCase{int32(math.MaxInt32), true, false, ""}, + + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(kTwoTo54 - 1), true, false, ""}, + leTestCase{int64(kTwoTo54 + 0), true, false, ""}, + leTestCase{int64(kTwoTo54 + 1), true, false, ""}, + leTestCase{int64(kTwoTo54 + 2), false, false, ""}, + + // Unsigned integers. + leTestCase{uint(0), true, false, ""}, + leTestCase{uint(math.MaxUint32), true, false, ""}, + + leTestCase{uint8(0), true, false, ""}, + leTestCase{uint8(255), true, false, ""}, + + leTestCase{uint16(0), true, false, ""}, + leTestCase{uint16(65535), true, false, ""}, + + leTestCase{uint32(0), true, false, ""}, + leTestCase{uint32(math.MaxUint32), true, false, ""}, + + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + + // Floating point. + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(kTwoTo54 - 2), true, false, ""}, + leTestCase{float64(kTwoTo54 - 1), true, false, ""}, + leTestCase{float64(kTwoTo54 + 0), true, false, ""}, + leTestCase{float64(kTwoTo54 + 1), true, false, ""}, + leTestCase{float64(kTwoTo54 + 2), true, false, ""}, + leTestCase{float64(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) Uint64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := LessOrEqual(uint64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "less than or equal to 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-1, true, false, ""}, + leTestCase{kTwoTo25 + 0, true, false, ""}, + leTestCase{kTwoTo25 + 1, true, false, ""}, + leTestCase{kTwoTo25 + 2, false, false, ""}, + + leTestCase{int(-1), true, false, ""}, + leTestCase{int(kTwoTo25 + 0), true, false, ""}, + leTestCase{int(kTwoTo25 + 1), true, false, ""}, + leTestCase{int(kTwoTo25 + 2), false, false, ""}, + + leTestCase{int8(-1), true, false, ""}, + leTestCase{int8(127), true, false, ""}, + + leTestCase{int16(-1), true, false, ""}, + leTestCase{int16(0), true, false, ""}, + leTestCase{int16(32767), true, false, ""}, + + leTestCase{int32(-1), true, false, ""}, + leTestCase{int32(kTwoTo25 + 0), true, false, ""}, + leTestCase{int32(kTwoTo25 + 1), true, false, ""}, + leTestCase{int32(kTwoTo25 + 2), false, false, ""}, + + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(kTwoTo25 + 0), true, false, ""}, + leTestCase{int64(kTwoTo25 + 1), true, false, ""}, + leTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + // Unsigned integers. + leTestCase{uint(0), true, false, ""}, + leTestCase{uint(kTwoTo25 + 0), true, false, ""}, + leTestCase{uint(kTwoTo25 + 1), true, false, ""}, + leTestCase{uint(kTwoTo25 + 2), false, false, ""}, + + leTestCase{uint8(0), true, false, ""}, + leTestCase{uint8(255), true, false, ""}, + + leTestCase{uint16(0), true, false, ""}, + leTestCase{uint16(65535), true, false, ""}, + + leTestCase{uint32(0), true, false, ""}, + leTestCase{uint32(kTwoTo25 + 0), true, false, ""}, + leTestCase{uint32(kTwoTo25 + 1), true, false, ""}, + leTestCase{uint32(kTwoTo25 + 2), false, false, ""}, + + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Floating point. + leTestCase{float32(-1), true, false, ""}, + leTestCase{float32(kTwoTo25 - 2), true, false, ""}, + leTestCase{float32(kTwoTo25 - 1), true, false, ""}, + leTestCase{float32(kTwoTo25 + 0), true, false, ""}, + leTestCase{float32(kTwoTo25 + 1), true, false, ""}, + leTestCase{float32(kTwoTo25 + 2), true, false, ""}, + leTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(kTwoTo25 - 2), true, false, ""}, + leTestCase{float64(kTwoTo25 - 1), true, false, ""}, + leTestCase{float64(kTwoTo25 + 0), true, false, ""}, + leTestCase{float64(kTwoTo25 + 1), true, false, ""}, + leTestCase{float64(kTwoTo25 + 2), false, false, ""}, + leTestCase{float64(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) Uint64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := LessOrEqual(uint64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "less than or equal to 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{-1, true, false, ""}, + leTestCase{1 << 30, true, false, ""}, + + leTestCase{int(-1), true, false, ""}, + leTestCase{int(math.MaxInt32), true, false, ""}, + + leTestCase{int8(-1), true, false, ""}, + leTestCase{int8(127), true, false, ""}, + + leTestCase{int16(-1), true, false, ""}, + leTestCase{int16(0), true, false, ""}, + leTestCase{int16(32767), true, false, ""}, + + leTestCase{int32(-1), true, false, ""}, + leTestCase{int32(math.MaxInt32), true, false, ""}, + + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(kTwoTo54 - 1), true, false, ""}, + leTestCase{int64(kTwoTo54 + 0), true, false, ""}, + leTestCase{int64(kTwoTo54 + 1), true, false, ""}, + leTestCase{int64(kTwoTo54 + 2), false, false, ""}, + + // Unsigned integers. + leTestCase{uint(0), true, false, ""}, + leTestCase{uint(math.MaxUint32), true, false, ""}, + + leTestCase{uint8(0), true, false, ""}, + leTestCase{uint8(255), true, false, ""}, + + leTestCase{uint16(0), true, false, ""}, + leTestCase{uint16(65535), true, false, ""}, + + leTestCase{uint32(0), true, false, ""}, + leTestCase{uint32(math.MaxUint32), true, false, ""}, + + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + + // Floating point. + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(kTwoTo54 - 2), true, false, ""}, + leTestCase{float64(kTwoTo54 - 1), true, false, ""}, + leTestCase{float64(kTwoTo54 + 0), true, false, ""}, + leTestCase{float64(kTwoTo54 + 1), true, false, ""}, + leTestCase{float64(kTwoTo54 + 2), true, false, ""}, + leTestCase{float64(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) Float32AboveExactIntegerRange() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := LessOrEqual(float32(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "less than or equal to 3.3554432e+07" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(kTwoTo25 - 2), true, false, ""}, + leTestCase{int64(kTwoTo25 - 1), true, false, ""}, + leTestCase{int64(kTwoTo25 + 0), true, false, ""}, + leTestCase{int64(kTwoTo25 + 1), true, false, ""}, + leTestCase{int64(kTwoTo25 + 2), true, false, ""}, + leTestCase{int64(kTwoTo25 + 3), false, false, ""}, + + // Unsigned integers. + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(kTwoTo25 - 2), true, false, ""}, + leTestCase{uint64(kTwoTo25 - 1), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 1), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 2), true, false, ""}, + leTestCase{uint64(kTwoTo25 + 3), false, false, ""}, + + // Floating point. + leTestCase{float32(-1), true, false, ""}, + leTestCase{float32(kTwoTo25 - 2), true, false, ""}, + leTestCase{float32(kTwoTo25 - 1), true, false, ""}, + leTestCase{float32(kTwoTo25 + 0), true, false, ""}, + leTestCase{float32(kTwoTo25 + 1), true, false, ""}, + leTestCase{float32(kTwoTo25 + 2), true, false, ""}, + leTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(kTwoTo25 - 2), true, false, ""}, + leTestCase{float64(kTwoTo25 - 1), true, false, ""}, + leTestCase{float64(kTwoTo25 + 0), true, false, ""}, + leTestCase{float64(kTwoTo25 + 1), true, false, ""}, + leTestCase{float64(kTwoTo25 + 2), true, false, ""}, + leTestCase{float64(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) Float64AboveExactIntegerRange() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := LessOrEqual(float64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "less than or equal to 1.8014398509481984e+16" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + // Signed integers. + leTestCase{int64(-1), true, false, ""}, + leTestCase{int64(kTwoTo54 - 2), true, false, ""}, + leTestCase{int64(kTwoTo54 - 1), true, false, ""}, + leTestCase{int64(kTwoTo54 + 0), true, false, ""}, + leTestCase{int64(kTwoTo54 + 1), true, false, ""}, + leTestCase{int64(kTwoTo54 + 2), true, false, ""}, + leTestCase{int64(kTwoTo54 + 3), false, false, ""}, + + // Unsigned integers. + leTestCase{uint64(0), true, false, ""}, + leTestCase{uint64(kTwoTo54 - 2), true, false, ""}, + leTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 1), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 2), true, false, ""}, + leTestCase{uint64(kTwoTo54 + 3), false, false, ""}, + + // Floating point. + leTestCase{float64(-1), true, false, ""}, + leTestCase{float64(kTwoTo54 - 2), true, false, ""}, + leTestCase{float64(kTwoTo54 - 1), true, false, ""}, + leTestCase{float64(kTwoTo54 + 0), true, false, ""}, + leTestCase{float64(kTwoTo54 + 1), true, false, ""}, + leTestCase{float64(kTwoTo54 + 2), true, false, ""}, + leTestCase{float64(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// String literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessOrEqualTest) EmptyString() { + matcher := LessOrEqual("") + desc := matcher.Description() + expectedDesc := "less than or equal to \"\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + leTestCase{"", true, false, ""}, + leTestCase{"\x00", false, false, ""}, + leTestCase{"a", false, false, ""}, + leTestCase{"foo", false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) SingleNullByte() { + matcher := LessOrEqual("\x00") + desc := matcher.Description() + expectedDesc := "less than or equal to \"\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + leTestCase{"", true, false, ""}, + leTestCase{"\x00", true, false, ""}, + leTestCase{"\x00\x00", false, false, ""}, + leTestCase{"a", false, false, ""}, + leTestCase{"foo", false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessOrEqualTest) LongerString() { + matcher := LessOrEqual("foo\x00") + desc := matcher.Description() + expectedDesc := "less than or equal to \"foo\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []leTestCase{ + leTestCase{"", true, false, ""}, + leTestCase{"\x00", true, false, ""}, + leTestCase{"bar", true, false, ""}, + leTestCase{"foo", true, false, ""}, + leTestCase{"foo\x00", true, false, ""}, + leTestCase{"foo\x00\x00", false, false, ""}, + leTestCase{"fooa", false, false, ""}, + leTestCase{"qux", false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go new file mode 100644 index 0000000000..59f5b7f56b --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/less_than_test.go @@ -0,0 +1,1057 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "math" + + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type LessThanTest struct { +} + +func init() { RegisterTestSuite(&LessThanTest{}) } + +type ltTestCase struct { + candidate interface{} + expectedResult bool + shouldBeFatal bool + expectedError string +} + +func (t *LessThanTest) checkTestCases(matcher Matcher, cases []ltTestCase) { + for i, c := range cases { + err := matcher.Matches(c.candidate) + + ExpectThat( + (err == nil), + Equals(c.expectedResult), + "Case %d (candidate %v)", + i, + c.candidate) + + if err == nil { + continue + } + + _, isFatal := err.(*FatalError) + ExpectEq( + c.shouldBeFatal, + isFatal, + "Case %d (candidate %v)", + i, + c.candidate) + + ExpectThat( + err, + Error(Equals(c.expectedError)), + "Case %d (candidate %v)", + i, + c.candidate) + } +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessThanTest) IntegerCandidateBadTypes() { + matcher := LessThan(int(-150)) + + cases := []ltTestCase{ + ltTestCase{true, false, true, "which is not comparable"}, + ltTestCase{complex64(-151), false, true, "which is not comparable"}, + ltTestCase{complex128(-151), false, true, "which is not comparable"}, + ltTestCase{[...]int{-151}, false, true, "which is not comparable"}, + ltTestCase{make(chan int), false, true, "which is not comparable"}, + ltTestCase{func() {}, false, true, "which is not comparable"}, + ltTestCase{map[int]int{}, false, true, "which is not comparable"}, + ltTestCase{<TestCase{}, false, true, "which is not comparable"}, + ltTestCase{make([]int, 0), false, true, "which is not comparable"}, + ltTestCase{"-151", false, true, "which is not comparable"}, + ltTestCase{ltTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) FloatCandidateBadTypes() { + matcher := LessThan(float32(-150)) + + cases := []ltTestCase{ + ltTestCase{true, false, true, "which is not comparable"}, + ltTestCase{complex64(-151), false, true, "which is not comparable"}, + ltTestCase{complex128(-151), false, true, "which is not comparable"}, + ltTestCase{[...]int{-151}, false, true, "which is not comparable"}, + ltTestCase{make(chan int), false, true, "which is not comparable"}, + ltTestCase{func() {}, false, true, "which is not comparable"}, + ltTestCase{map[int]int{}, false, true, "which is not comparable"}, + ltTestCase{<TestCase{}, false, true, "which is not comparable"}, + ltTestCase{make([]int, 0), false, true, "which is not comparable"}, + ltTestCase{"-151", false, true, "which is not comparable"}, + ltTestCase{ltTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) StringCandidateBadTypes() { + matcher := LessThan("17") + + cases := []ltTestCase{ + ltTestCase{true, false, true, "which is not comparable"}, + ltTestCase{int(0), false, true, "which is not comparable"}, + ltTestCase{int8(0), false, true, "which is not comparable"}, + ltTestCase{int16(0), false, true, "which is not comparable"}, + ltTestCase{int32(0), false, true, "which is not comparable"}, + ltTestCase{int64(0), false, true, "which is not comparable"}, + ltTestCase{uint(0), false, true, "which is not comparable"}, + ltTestCase{uint8(0), false, true, "which is not comparable"}, + ltTestCase{uint16(0), false, true, "which is not comparable"}, + ltTestCase{uint32(0), false, true, "which is not comparable"}, + ltTestCase{uint64(0), false, true, "which is not comparable"}, + ltTestCase{float32(0), false, true, "which is not comparable"}, + ltTestCase{float64(0), false, true, "which is not comparable"}, + ltTestCase{complex64(-151), false, true, "which is not comparable"}, + ltTestCase{complex128(-151), false, true, "which is not comparable"}, + ltTestCase{[...]int{-151}, false, true, "which is not comparable"}, + ltTestCase{make(chan int), false, true, "which is not comparable"}, + ltTestCase{func() {}, false, true, "which is not comparable"}, + ltTestCase{map[int]int{}, false, true, "which is not comparable"}, + ltTestCase{<TestCase{}, false, true, "which is not comparable"}, + ltTestCase{make([]int, 0), false, true, "which is not comparable"}, + ltTestCase{ltTestCase{}, false, true, "which is not comparable"}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) BadArgument() { + panicked := false + + defer func() { + ExpectThat(panicked, Equals(true)) + }() + + defer func() { + if r := recover(); r != nil { + panicked = true + } + }() + + LessThan(complex128(0)) +} + +//////////////////////////////////////////////////////////////////////// +// Integer literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessThanTest) NegativeIntegerLiteral() { + matcher := LessThan(-150) + desc := matcher.Description() + expectedDesc := "less than -150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-(1 << 30), true, false, ""}, + ltTestCase{-151, true, false, ""}, + ltTestCase{-150, false, false, ""}, + ltTestCase{0, false, false, ""}, + ltTestCase{17, false, false, ""}, + + ltTestCase{int(-(1 << 30)), true, false, ""}, + ltTestCase{int(-151), true, false, ""}, + ltTestCase{int(-150), false, false, ""}, + ltTestCase{int(0), false, false, ""}, + ltTestCase{int(17), false, false, ""}, + + ltTestCase{int8(-127), false, false, ""}, + ltTestCase{int8(0), false, false, ""}, + ltTestCase{int8(17), false, false, ""}, + + ltTestCase{int16(-(1 << 14)), true, false, ""}, + ltTestCase{int16(-151), true, false, ""}, + ltTestCase{int16(-150), false, false, ""}, + ltTestCase{int16(0), false, false, ""}, + ltTestCase{int16(17), false, false, ""}, + + ltTestCase{int32(-(1 << 30)), true, false, ""}, + ltTestCase{int32(-151), true, false, ""}, + ltTestCase{int32(-150), false, false, ""}, + ltTestCase{int32(0), false, false, ""}, + ltTestCase{int32(17), false, false, ""}, + + ltTestCase{int64(-(1 << 30)), true, false, ""}, + ltTestCase{int64(-151), true, false, ""}, + ltTestCase{int64(-150), false, false, ""}, + ltTestCase{int64(0), false, false, ""}, + ltTestCase{int64(17), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint((1 << 32) - 151), false, false, ""}, + ltTestCase{uint(0), false, false, ""}, + ltTestCase{uint(17), false, false, ""}, + + ltTestCase{uint8(0), false, false, ""}, + ltTestCase{uint8(17), false, false, ""}, + ltTestCase{uint8(253), false, false, ""}, + + ltTestCase{uint16((1 << 16) - 151), false, false, ""}, + ltTestCase{uint16(0), false, false, ""}, + ltTestCase{uint16(17), false, false, ""}, + + ltTestCase{uint32((1 << 32) - 151), false, false, ""}, + ltTestCase{uint32(0), false, false, ""}, + ltTestCase{uint32(17), false, false, ""}, + + ltTestCase{uint64((1 << 64) - 151), false, false, ""}, + ltTestCase{uint64(0), false, false, ""}, + ltTestCase{uint64(17), false, false, ""}, + + // Floating point. + ltTestCase{float32(-(1 << 30)), true, false, ""}, + ltTestCase{float32(-151), true, false, ""}, + ltTestCase{float32(-150.1), true, false, ""}, + ltTestCase{float32(-150), false, false, ""}, + ltTestCase{float32(-149.9), false, false, ""}, + ltTestCase{float32(0), false, false, ""}, + ltTestCase{float32(17), false, false, ""}, + ltTestCase{float32(160), false, false, ""}, + + ltTestCase{float64(-(1 << 30)), true, false, ""}, + ltTestCase{float64(-151), true, false, ""}, + ltTestCase{float64(-150.1), true, false, ""}, + ltTestCase{float64(-150), false, false, ""}, + ltTestCase{float64(-149.9), false, false, ""}, + ltTestCase{float64(0), false, false, ""}, + ltTestCase{float64(17), false, false, ""}, + ltTestCase{float64(160), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) ZeroIntegerLiteral() { + matcher := LessThan(0) + desc := matcher.Description() + expectedDesc := "less than 0" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-(1 << 30), true, false, ""}, + ltTestCase{-1, true, false, ""}, + ltTestCase{0, false, false, ""}, + ltTestCase{1, false, false, ""}, + ltTestCase{17, false, false, ""}, + ltTestCase{(1 << 30), false, false, ""}, + + ltTestCase{int(-(1 << 30)), true, false, ""}, + ltTestCase{int(-1), true, false, ""}, + ltTestCase{int(0), false, false, ""}, + ltTestCase{int(1), false, false, ""}, + ltTestCase{int(17), false, false, ""}, + + ltTestCase{int8(-1), true, false, ""}, + ltTestCase{int8(0), false, false, ""}, + ltTestCase{int8(1), false, false, ""}, + + ltTestCase{int16(-(1 << 14)), true, false, ""}, + ltTestCase{int16(-1), true, false, ""}, + ltTestCase{int16(0), false, false, ""}, + ltTestCase{int16(1), false, false, ""}, + ltTestCase{int16(17), false, false, ""}, + + ltTestCase{int32(-(1 << 30)), true, false, ""}, + ltTestCase{int32(-1), true, false, ""}, + ltTestCase{int32(0), false, false, ""}, + ltTestCase{int32(1), false, false, ""}, + ltTestCase{int32(17), false, false, ""}, + + ltTestCase{int64(-(1 << 30)), true, false, ""}, + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(0), false, false, ""}, + ltTestCase{int64(1), false, false, ""}, + ltTestCase{int64(17), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint((1 << 32) - 1), false, false, ""}, + ltTestCase{uint(0), false, false, ""}, + ltTestCase{uint(17), false, false, ""}, + + ltTestCase{uint8(0), false, false, ""}, + ltTestCase{uint8(17), false, false, ""}, + ltTestCase{uint8(253), false, false, ""}, + + ltTestCase{uint16((1 << 16) - 1), false, false, ""}, + ltTestCase{uint16(0), false, false, ""}, + ltTestCase{uint16(17), false, false, ""}, + + ltTestCase{uint32((1 << 32) - 1), false, false, ""}, + ltTestCase{uint32(0), false, false, ""}, + ltTestCase{uint32(17), false, false, ""}, + + ltTestCase{uint64((1 << 64) - 1), false, false, ""}, + ltTestCase{uint64(0), false, false, ""}, + ltTestCase{uint64(17), false, false, ""}, + + // Floating point. + ltTestCase{float32(-(1 << 30)), true, false, ""}, + ltTestCase{float32(-1), true, false, ""}, + ltTestCase{float32(-0.1), true, false, ""}, + ltTestCase{float32(-0.0), false, false, ""}, + ltTestCase{float32(0), false, false, ""}, + ltTestCase{float32(0.1), false, false, ""}, + ltTestCase{float32(17), false, false, ""}, + ltTestCase{float32(160), false, false, ""}, + + ltTestCase{float64(-(1 << 30)), true, false, ""}, + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(-0.1), true, false, ""}, + ltTestCase{float64(-0), false, false, ""}, + ltTestCase{float64(0), false, false, ""}, + ltTestCase{float64(17), false, false, ""}, + ltTestCase{float64(160), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) PositiveIntegerLiteral() { + matcher := LessThan(150) + desc := matcher.Description() + expectedDesc := "less than 150" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-1, true, false, ""}, + ltTestCase{149, true, false, ""}, + ltTestCase{150, false, false, ""}, + ltTestCase{151, false, false, ""}, + + ltTestCase{int(-1), true, false, ""}, + ltTestCase{int(149), true, false, ""}, + ltTestCase{int(150), false, false, ""}, + ltTestCase{int(151), false, false, ""}, + + ltTestCase{int8(-1), true, false, ""}, + ltTestCase{int8(0), true, false, ""}, + ltTestCase{int8(17), true, false, ""}, + ltTestCase{int8(127), true, false, ""}, + + ltTestCase{int16(-1), true, false, ""}, + ltTestCase{int16(149), true, false, ""}, + ltTestCase{int16(150), false, false, ""}, + ltTestCase{int16(151), false, false, ""}, + + ltTestCase{int32(-1), true, false, ""}, + ltTestCase{int32(149), true, false, ""}, + ltTestCase{int32(150), false, false, ""}, + ltTestCase{int32(151), false, false, ""}, + + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(149), true, false, ""}, + ltTestCase{int64(150), false, false, ""}, + ltTestCase{int64(151), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint(0), true, false, ""}, + ltTestCase{uint(149), true, false, ""}, + ltTestCase{uint(150), false, false, ""}, + ltTestCase{uint(151), false, false, ""}, + + ltTestCase{uint8(0), true, false, ""}, + ltTestCase{uint8(127), true, false, ""}, + + ltTestCase{uint16(0), true, false, ""}, + ltTestCase{uint16(149), true, false, ""}, + ltTestCase{uint16(150), false, false, ""}, + ltTestCase{uint16(151), false, false, ""}, + + ltTestCase{uint32(0), true, false, ""}, + ltTestCase{uint32(149), true, false, ""}, + ltTestCase{uint32(150), false, false, ""}, + ltTestCase{uint32(151), false, false, ""}, + + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(149), true, false, ""}, + ltTestCase{uint64(150), false, false, ""}, + ltTestCase{uint64(151), false, false, ""}, + + // Floating point. + ltTestCase{float32(-1), true, false, ""}, + ltTestCase{float32(149), true, false, ""}, + ltTestCase{float32(149.9), true, false, ""}, + ltTestCase{float32(150), false, false, ""}, + ltTestCase{float32(150.1), false, false, ""}, + ltTestCase{float32(151), false, false, ""}, + + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(149), true, false, ""}, + ltTestCase{float64(149.9), true, false, ""}, + ltTestCase{float64(150), false, false, ""}, + ltTestCase{float64(150.1), false, false, ""}, + ltTestCase{float64(151), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Float literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessThanTest) NegativeFloatLiteral() { + matcher := LessThan(-150.1) + desc := matcher.Description() + expectedDesc := "less than -150.1" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-(1 << 30), true, false, ""}, + ltTestCase{-151, true, false, ""}, + ltTestCase{-150, false, false, ""}, + ltTestCase{0, false, false, ""}, + ltTestCase{17, false, false, ""}, + + ltTestCase{int(-(1 << 30)), true, false, ""}, + ltTestCase{int(-151), true, false, ""}, + ltTestCase{int(-150), false, false, ""}, + ltTestCase{int(0), false, false, ""}, + ltTestCase{int(17), false, false, ""}, + + ltTestCase{int8(-127), false, false, ""}, + ltTestCase{int8(0), false, false, ""}, + ltTestCase{int8(17), false, false, ""}, + + ltTestCase{int16(-(1 << 14)), true, false, ""}, + ltTestCase{int16(-151), true, false, ""}, + ltTestCase{int16(-150), false, false, ""}, + ltTestCase{int16(0), false, false, ""}, + ltTestCase{int16(17), false, false, ""}, + + ltTestCase{int32(-(1 << 30)), true, false, ""}, + ltTestCase{int32(-151), true, false, ""}, + ltTestCase{int32(-150), false, false, ""}, + ltTestCase{int32(0), false, false, ""}, + ltTestCase{int32(17), false, false, ""}, + + ltTestCase{int64(-(1 << 30)), true, false, ""}, + ltTestCase{int64(-151), true, false, ""}, + ltTestCase{int64(-150), false, false, ""}, + ltTestCase{int64(0), false, false, ""}, + ltTestCase{int64(17), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint((1 << 32) - 151), false, false, ""}, + ltTestCase{uint(0), false, false, ""}, + ltTestCase{uint(17), false, false, ""}, + + ltTestCase{uint8(0), false, false, ""}, + ltTestCase{uint8(17), false, false, ""}, + ltTestCase{uint8(253), false, false, ""}, + + ltTestCase{uint16((1 << 16) - 151), false, false, ""}, + ltTestCase{uint16(0), false, false, ""}, + ltTestCase{uint16(17), false, false, ""}, + + ltTestCase{uint32((1 << 32) - 151), false, false, ""}, + ltTestCase{uint32(0), false, false, ""}, + ltTestCase{uint32(17), false, false, ""}, + + ltTestCase{uint64((1 << 64) - 151), false, false, ""}, + ltTestCase{uint64(0), false, false, ""}, + ltTestCase{uint64(17), false, false, ""}, + + // Floating point. + ltTestCase{float32(-(1 << 30)), true, false, ""}, + ltTestCase{float32(-151), true, false, ""}, + ltTestCase{float32(-150.2), true, false, ""}, + ltTestCase{float32(-150.1), false, false, ""}, + ltTestCase{float32(-150), false, false, ""}, + ltTestCase{float32(0), false, false, ""}, + ltTestCase{float32(17), false, false, ""}, + ltTestCase{float32(160), false, false, ""}, + + ltTestCase{float64(-(1 << 30)), true, false, ""}, + ltTestCase{float64(-151), true, false, ""}, + ltTestCase{float64(-150.2), true, false, ""}, + ltTestCase{float64(-150.1), false, false, ""}, + ltTestCase{float64(-150), false, false, ""}, + ltTestCase{float64(0), false, false, ""}, + ltTestCase{float64(17), false, false, ""}, + ltTestCase{float64(160), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) PositiveFloatLiteral() { + matcher := LessThan(149.9) + desc := matcher.Description() + expectedDesc := "less than 149.9" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-1, true, false, ""}, + ltTestCase{149, true, false, ""}, + ltTestCase{150, false, false, ""}, + ltTestCase{151, false, false, ""}, + + ltTestCase{int(-1), true, false, ""}, + ltTestCase{int(149), true, false, ""}, + ltTestCase{int(150), false, false, ""}, + ltTestCase{int(151), false, false, ""}, + + ltTestCase{int8(-1), true, false, ""}, + ltTestCase{int8(0), true, false, ""}, + ltTestCase{int8(17), true, false, ""}, + ltTestCase{int8(127), true, false, ""}, + + ltTestCase{int16(-1), true, false, ""}, + ltTestCase{int16(149), true, false, ""}, + ltTestCase{int16(150), false, false, ""}, + ltTestCase{int16(151), false, false, ""}, + + ltTestCase{int32(-1), true, false, ""}, + ltTestCase{int32(149), true, false, ""}, + ltTestCase{int32(150), false, false, ""}, + ltTestCase{int32(151), false, false, ""}, + + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(149), true, false, ""}, + ltTestCase{int64(150), false, false, ""}, + ltTestCase{int64(151), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint(0), true, false, ""}, + ltTestCase{uint(149), true, false, ""}, + ltTestCase{uint(150), false, false, ""}, + ltTestCase{uint(151), false, false, ""}, + + ltTestCase{uint8(0), true, false, ""}, + ltTestCase{uint8(127), true, false, ""}, + + ltTestCase{uint16(0), true, false, ""}, + ltTestCase{uint16(149), true, false, ""}, + ltTestCase{uint16(150), false, false, ""}, + ltTestCase{uint16(151), false, false, ""}, + + ltTestCase{uint32(0), true, false, ""}, + ltTestCase{uint32(149), true, false, ""}, + ltTestCase{uint32(150), false, false, ""}, + ltTestCase{uint32(151), false, false, ""}, + + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(149), true, false, ""}, + ltTestCase{uint64(150), false, false, ""}, + ltTestCase{uint64(151), false, false, ""}, + + // Floating point. + ltTestCase{float32(-1), true, false, ""}, + ltTestCase{float32(149), true, false, ""}, + ltTestCase{float32(149.8), true, false, ""}, + ltTestCase{float32(149.9), false, false, ""}, + ltTestCase{float32(150), false, false, ""}, + ltTestCase{float32(151), false, false, ""}, + + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(149), true, false, ""}, + ltTestCase{float64(149.8), true, false, ""}, + ltTestCase{float64(149.9), false, false, ""}, + ltTestCase{float64(150), false, false, ""}, + ltTestCase{float64(151), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// Subtle cases +//////////////////////////////////////////////////////////////////////// + +func (t *LessThanTest) Int64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := LessThan(int64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "less than 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-1, true, false, ""}, + ltTestCase{kTwoTo25 + 0, true, false, ""}, + ltTestCase{kTwoTo25 + 1, false, false, ""}, + ltTestCase{kTwoTo25 + 2, false, false, ""}, + + ltTestCase{int(-1), true, false, ""}, + ltTestCase{int(kTwoTo25 + 0), true, false, ""}, + ltTestCase{int(kTwoTo25 + 1), false, false, ""}, + ltTestCase{int(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{int8(-1), true, false, ""}, + ltTestCase{int8(127), true, false, ""}, + + ltTestCase{int16(-1), true, false, ""}, + ltTestCase{int16(0), true, false, ""}, + ltTestCase{int16(32767), true, false, ""}, + + ltTestCase{int32(-1), true, false, ""}, + ltTestCase{int32(kTwoTo25 + 0), true, false, ""}, + ltTestCase{int32(kTwoTo25 + 1), false, false, ""}, + ltTestCase{int32(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(kTwoTo25 + 0), true, false, ""}, + ltTestCase{int64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint(0), true, false, ""}, + ltTestCase{uint(kTwoTo25 + 0), true, false, ""}, + ltTestCase{uint(kTwoTo25 + 1), false, false, ""}, + ltTestCase{uint(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{uint8(0), true, false, ""}, + ltTestCase{uint8(255), true, false, ""}, + + ltTestCase{uint16(0), true, false, ""}, + ltTestCase{uint16(65535), true, false, ""}, + + ltTestCase{uint32(0), true, false, ""}, + ltTestCase{uint32(kTwoTo25 + 0), true, false, ""}, + ltTestCase{uint32(kTwoTo25 + 1), false, false, ""}, + ltTestCase{uint32(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + ltTestCase{uint64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Floating point. + ltTestCase{float32(-1), true, false, ""}, + ltTestCase{float32(kTwoTo25 - 2), true, false, ""}, + ltTestCase{float32(kTwoTo25 - 1), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 0), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 1), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 2), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(kTwoTo25 - 2), true, false, ""}, + ltTestCase{float64(kTwoTo25 - 1), true, false, ""}, + ltTestCase{float64(kTwoTo25 + 0), true, false, ""}, + ltTestCase{float64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 2), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) Int64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := LessThan(int64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "less than 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-1, true, false, ""}, + ltTestCase{1 << 30, true, false, ""}, + + ltTestCase{int(-1), true, false, ""}, + ltTestCase{int(math.MaxInt32), true, false, ""}, + + ltTestCase{int8(-1), true, false, ""}, + ltTestCase{int8(127), true, false, ""}, + + ltTestCase{int16(-1), true, false, ""}, + ltTestCase{int16(0), true, false, ""}, + ltTestCase{int16(32767), true, false, ""}, + + ltTestCase{int32(-1), true, false, ""}, + ltTestCase{int32(math.MaxInt32), true, false, ""}, + + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(kTwoTo54 - 1), true, false, ""}, + ltTestCase{int64(kTwoTo54 + 0), true, false, ""}, + ltTestCase{int64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{int64(kTwoTo54 + 2), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint(0), true, false, ""}, + ltTestCase{uint(math.MaxUint32), true, false, ""}, + + ltTestCase{uint8(0), true, false, ""}, + ltTestCase{uint8(255), true, false, ""}, + + ltTestCase{uint16(0), true, false, ""}, + ltTestCase{uint16(65535), true, false, ""}, + + ltTestCase{uint32(0), true, false, ""}, + ltTestCase{uint32(math.MaxUint32), true, false, ""}, + + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + ltTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + ltTestCase{uint64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + + // Floating point. + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(kTwoTo54 - 2), true, false, ""}, + ltTestCase{float64(kTwoTo54 - 1), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 0), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 2), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) Uint64NotExactlyRepresentableBySinglePrecision() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := LessThan(uint64(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "less than 33554433" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-1, true, false, ""}, + ltTestCase{kTwoTo25 + 0, true, false, ""}, + ltTestCase{kTwoTo25 + 1, false, false, ""}, + ltTestCase{kTwoTo25 + 2, false, false, ""}, + + ltTestCase{int(-1), true, false, ""}, + ltTestCase{int(kTwoTo25 + 0), true, false, ""}, + ltTestCase{int(kTwoTo25 + 1), false, false, ""}, + ltTestCase{int(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{int8(-1), true, false, ""}, + ltTestCase{int8(127), true, false, ""}, + + ltTestCase{int16(-1), true, false, ""}, + ltTestCase{int16(0), true, false, ""}, + ltTestCase{int16(32767), true, false, ""}, + + ltTestCase{int32(-1), true, false, ""}, + ltTestCase{int32(kTwoTo25 + 0), true, false, ""}, + ltTestCase{int32(kTwoTo25 + 1), false, false, ""}, + ltTestCase{int32(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(kTwoTo25 + 0), true, false, ""}, + ltTestCase{int64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{int64(kTwoTo25 + 2), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint(0), true, false, ""}, + ltTestCase{uint(kTwoTo25 + 0), true, false, ""}, + ltTestCase{uint(kTwoTo25 + 1), false, false, ""}, + ltTestCase{uint(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{uint8(0), true, false, ""}, + ltTestCase{uint8(255), true, false, ""}, + + ltTestCase{uint16(0), true, false, ""}, + ltTestCase{uint16(65535), true, false, ""}, + + ltTestCase{uint32(0), true, false, ""}, + ltTestCase{uint32(kTwoTo25 + 0), true, false, ""}, + ltTestCase{uint32(kTwoTo25 + 1), false, false, ""}, + ltTestCase{uint32(kTwoTo25 + 2), false, false, ""}, + + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(kTwoTo25 + 0), true, false, ""}, + ltTestCase{uint64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + + // Floating point. + ltTestCase{float32(-1), true, false, ""}, + ltTestCase{float32(kTwoTo25 - 2), true, false, ""}, + ltTestCase{float32(kTwoTo25 - 1), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 0), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 1), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 2), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(kTwoTo25 - 2), true, false, ""}, + ltTestCase{float64(kTwoTo25 - 1), true, false, ""}, + ltTestCase{float64(kTwoTo25 + 0), true, false, ""}, + ltTestCase{float64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 2), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) Uint64NotExactlyRepresentableByDoublePrecision() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := LessThan(uint64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "less than 18014398509481985" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{-1, true, false, ""}, + ltTestCase{1 << 30, true, false, ""}, + + ltTestCase{int(-1), true, false, ""}, + ltTestCase{int(math.MaxInt32), true, false, ""}, + + ltTestCase{int8(-1), true, false, ""}, + ltTestCase{int8(127), true, false, ""}, + + ltTestCase{int16(-1), true, false, ""}, + ltTestCase{int16(0), true, false, ""}, + ltTestCase{int16(32767), true, false, ""}, + + ltTestCase{int32(-1), true, false, ""}, + ltTestCase{int32(math.MaxInt32), true, false, ""}, + + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(kTwoTo54 - 1), true, false, ""}, + ltTestCase{int64(kTwoTo54 + 0), true, false, ""}, + ltTestCase{int64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{int64(kTwoTo54 + 2), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint(0), true, false, ""}, + ltTestCase{uint(math.MaxUint32), true, false, ""}, + + ltTestCase{uint8(0), true, false, ""}, + ltTestCase{uint8(255), true, false, ""}, + + ltTestCase{uint16(0), true, false, ""}, + ltTestCase{uint16(65535), true, false, ""}, + + ltTestCase{uint32(0), true, false, ""}, + ltTestCase{uint32(math.MaxUint32), true, false, ""}, + + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(kTwoTo54 - 1), true, false, ""}, + ltTestCase{uint64(kTwoTo54 + 0), true, false, ""}, + ltTestCase{uint64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + + // Floating point. + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(kTwoTo54 - 2), true, false, ""}, + ltTestCase{float64(kTwoTo54 - 1), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 0), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 2), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) Float32AboveExactIntegerRange() { + // Single-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^25-1, 2^25+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo25 = 1 << 25 + matcher := LessThan(float32(kTwoTo25 + 1)) + + desc := matcher.Description() + expectedDesc := "less than 3.3554432e+07" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(kTwoTo25 - 2), true, false, ""}, + ltTestCase{int64(kTwoTo25 - 1), false, false, ""}, + ltTestCase{int64(kTwoTo25 + 0), false, false, ""}, + ltTestCase{int64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{int64(kTwoTo25 + 2), false, false, ""}, + ltTestCase{int64(kTwoTo25 + 3), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(kTwoTo25 - 2), true, false, ""}, + ltTestCase{uint64(kTwoTo25 - 1), false, false, ""}, + ltTestCase{uint64(kTwoTo25 + 0), false, false, ""}, + ltTestCase{uint64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{uint64(kTwoTo25 + 2), false, false, ""}, + ltTestCase{uint64(kTwoTo25 + 3), false, false, ""}, + + // Floating point. + ltTestCase{float32(-1), true, false, ""}, + ltTestCase{float32(kTwoTo25 - 2), true, false, ""}, + ltTestCase{float32(kTwoTo25 - 1), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 0), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 1), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 2), false, false, ""}, + ltTestCase{float32(kTwoTo25 + 3), false, false, ""}, + + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(kTwoTo25 - 2), true, false, ""}, + ltTestCase{float64(kTwoTo25 - 1), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 0), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 1), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 2), false, false, ""}, + ltTestCase{float64(kTwoTo25 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) Float64AboveExactIntegerRange() { + // Double-precision floats don't have enough bits to represent the integers + // near this one distinctly, so [2^54-1, 2^54+2] all receive the same value + // and should be treated as equivalent when floats are in the mix. + const kTwoTo54 = 1 << 54 + matcher := LessThan(float64(kTwoTo54 + 1)) + + desc := matcher.Description() + expectedDesc := "less than 1.8014398509481984e+16" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + // Signed integers. + ltTestCase{int64(-1), true, false, ""}, + ltTestCase{int64(kTwoTo54 - 2), true, false, ""}, + ltTestCase{int64(kTwoTo54 - 1), false, false, ""}, + ltTestCase{int64(kTwoTo54 + 0), false, false, ""}, + ltTestCase{int64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{int64(kTwoTo54 + 2), false, false, ""}, + ltTestCase{int64(kTwoTo54 + 3), false, false, ""}, + + // Unsigned integers. + ltTestCase{uint64(0), true, false, ""}, + ltTestCase{uint64(kTwoTo54 - 2), true, false, ""}, + ltTestCase{uint64(kTwoTo54 - 1), false, false, ""}, + ltTestCase{uint64(kTwoTo54 + 0), false, false, ""}, + ltTestCase{uint64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{uint64(kTwoTo54 + 2), false, false, ""}, + ltTestCase{uint64(kTwoTo54 + 3), false, false, ""}, + + // Floating point. + ltTestCase{float64(-1), true, false, ""}, + ltTestCase{float64(kTwoTo54 - 2), true, false, ""}, + ltTestCase{float64(kTwoTo54 - 1), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 0), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 1), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 2), false, false, ""}, + ltTestCase{float64(kTwoTo54 + 3), false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +//////////////////////////////////////////////////////////////////////// +// String literals +//////////////////////////////////////////////////////////////////////// + +func (t *LessThanTest) EmptyString() { + matcher := LessThan("") + desc := matcher.Description() + expectedDesc := "less than \"\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + ltTestCase{"", false, false, ""}, + ltTestCase{"\x00", false, false, ""}, + ltTestCase{"a", false, false, ""}, + ltTestCase{"foo", false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) SingleNullByte() { + matcher := LessThan("\x00") + desc := matcher.Description() + expectedDesc := "less than \"\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + ltTestCase{"", true, false, ""}, + ltTestCase{"\x00", false, false, ""}, + ltTestCase{"a", false, false, ""}, + ltTestCase{"foo", false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} + +func (t *LessThanTest) LongerString() { + matcher := LessThan("foo\x00") + desc := matcher.Description() + expectedDesc := "less than \"foo\x00\"" + + ExpectThat(desc, Equals(expectedDesc)) + + cases := []ltTestCase{ + ltTestCase{"", true, false, ""}, + ltTestCase{"\x00", true, false, ""}, + ltTestCase{"bar", true, false, ""}, + ltTestCase{"foo", true, false, ""}, + ltTestCase{"foo\x00", false, false, ""}, + ltTestCase{"fooa", false, false, ""}, + ltTestCase{"qux", false, false, ""}, + } + + t.checkTestCases(matcher, cases) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go new file mode 100644 index 0000000000..031c6cb3ef --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/matches_regexp_test.go @@ -0,0 +1,92 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type MatchesRegexpTest struct { +} + +func init() { RegisterTestSuite(&MatchesRegexpTest{}) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *MatchesRegexpTest) Description() { + m := MatchesRegexp("foo.*bar") + ExpectEq("matches regexp \"foo.*bar\"", m.Description()) +} + +func (t *MatchesRegexpTest) InvalidRegexp() { + ExpectThat( + func() { MatchesRegexp("(foo") }, + Panics(HasSubstr("missing closing )"))) +} + +func (t *MatchesRegexpTest) CandidateIsNil() { + m := MatchesRegexp("") + err := m.Matches(nil) + + ExpectThat(err, Error(Equals("which is not a string or []byte"))) + ExpectTrue(isFatal(err)) +} + +func (t *MatchesRegexpTest) CandidateIsInteger() { + m := MatchesRegexp("") + err := m.Matches(17) + + ExpectThat(err, Error(Equals("which is not a string or []byte"))) + ExpectTrue(isFatal(err)) +} + +func (t *MatchesRegexpTest) NonMatchingCandidates() { + m := MatchesRegexp("fo[op]\\s+x") + var err error + + err = m.Matches("fon x") + ExpectThat(err, Error(Equals(""))) + ExpectFalse(isFatal(err)) + + err = m.Matches("fopx") + ExpectThat(err, Error(Equals(""))) + ExpectFalse(isFatal(err)) + + err = m.Matches("fop ") + ExpectThat(err, Error(Equals(""))) + ExpectFalse(isFatal(err)) +} + +func (t *MatchesRegexpTest) MatchingCandidates() { + m := MatchesRegexp("fo[op]\\s+x") + var err error + + err = m.Matches("foo x") + ExpectEq(nil, err) + + err = m.Matches("fop x") + ExpectEq(nil, err) + + err = m.Matches("blah blah foo x blah blah") + ExpectEq(nil, err) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/not_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/not_test.go new file mode 100644 index 0000000000..9c65b85ef8 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/not_test.go @@ -0,0 +1,108 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "errors" + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" + "testing" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type fakeMatcher struct { + matchFunc func(interface{}) error + description string +} + +func (m *fakeMatcher) Matches(c interface{}) error { + return m.matchFunc(c) +} + +func (m *fakeMatcher) Description() string { + return m.description +} + +type NotTest struct { + +} + +func init() { RegisterTestSuite(&NotTest{}) } +func TestOgletest(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *NotTest) CallsWrapped() { + var suppliedCandidate interface{} + matchFunc := func(c interface{}) error { + suppliedCandidate = c + return nil + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Not(wrapped) + + matcher.Matches(17) + ExpectThat(suppliedCandidate, Equals(17)) +} + +func (t *NotTest) WrappedReturnsTrue() { + matchFunc := func(c interface{}) error { + return nil + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Not(wrapped) + + err := matcher.Matches(0) + ExpectThat(err, Error(Equals(""))) +} + +func (t *NotTest) WrappedReturnsNonFatalError() { + matchFunc := func(c interface{}) error { + return errors.New("taco") + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Not(wrapped) + + err := matcher.Matches(0) + ExpectEq(nil, err) +} + +func (t *NotTest) WrappedReturnsFatalError() { + matchFunc := func(c interface{}) error { + return NewFatalError("taco") + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Not(wrapped) + + err := matcher.Matches(0) + ExpectThat(err, Error(Equals("taco"))) +} + +func (t *NotTest) Description() { + wrapped := &fakeMatcher{nil, "taco"} + matcher := Not(wrapped) + + ExpectEq("not(taco)", matcher.Description()) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go new file mode 100644 index 0000000000..fbb66bf31e --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/panics_test.go @@ -0,0 +1,141 @@ +// Copyright 2011 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "errors" + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type PanicsTest struct { + matcherCalled bool + suppliedCandidate interface{} + wrappedError error + + matcher Matcher +} + +func init() { RegisterTestSuite(&PanicsTest{}) } + +func (t *PanicsTest) SetUp(i *TestInfo) { + wrapped := &fakeMatcher{ + func(c interface{}) error { + t.matcherCalled = true + t.suppliedCandidate = c + return t.wrappedError + }, + "foo", + } + + t.matcher = Panics(wrapped) +} + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *PanicsTest) Description() { + ExpectThat(t.matcher.Description(), Equals("panics with: foo")) +} + +func (t *PanicsTest) CandidateIsNil() { + err := t.matcher.Matches(nil) + + ExpectThat(err, Error(Equals("which is not a zero-arg function"))) + ExpectTrue(isFatal(err)) +} + +func (t *PanicsTest) CandidateIsString() { + err := t.matcher.Matches("taco") + + ExpectThat(err, Error(Equals("which is not a zero-arg function"))) + ExpectTrue(isFatal(err)) +} + +func (t *PanicsTest) CandidateTakesArgs() { + err := t.matcher.Matches(func(i int) string { return "" }) + + ExpectThat(err, Error(Equals("which is not a zero-arg function"))) + ExpectTrue(isFatal(err)) +} + +func (t *PanicsTest) CallsFunction() { + callCount := 0 + t.matcher.Matches(func() string { + callCount++ + return "" + }) + + ExpectThat(callCount, Equals(1)) +} + +func (t *PanicsTest) FunctionDoesntPanic() { + err := t.matcher.Matches(func() {}) + + ExpectThat(err, Error(Equals("which didn't panic"))) + ExpectFalse(isFatal(err)) +} + +func (t *PanicsTest) CallsWrappedMatcher() { + expectedErr := 17 + t.wrappedError = errors.New("") + t.matcher.Matches(func() { panic(expectedErr) }) + + ExpectThat(t.suppliedCandidate, Equals(expectedErr)) +} + +func (t *PanicsTest) WrappedReturnsTrue() { + err := t.matcher.Matches(func() { panic("") }) + + ExpectEq(nil, err) +} + +func (t *PanicsTest) WrappedReturnsFatalErrorWithoutText() { + t.wrappedError = NewFatalError("") + err := t.matcher.Matches(func() { panic(17) }) + + ExpectThat(err, Error(Equals("which panicked with: 17"))) + ExpectFalse(isFatal(err)) +} + +func (t *PanicsTest) WrappedReturnsFatalErrorWithText() { + t.wrappedError = NewFatalError("which blah") + err := t.matcher.Matches(func() { panic(17) }) + + ExpectThat(err, Error(Equals("which panicked with: 17, which blah"))) + ExpectFalse(isFatal(err)) +} + +func (t *PanicsTest) WrappedReturnsNonFatalErrorWithoutText() { + t.wrappedError = errors.New("") + err := t.matcher.Matches(func() { panic(17) }) + + ExpectThat(err, Error(Equals("which panicked with: 17"))) + ExpectFalse(isFatal(err)) +} + +func (t *PanicsTest) WrappedReturnsNonFatalErrorWithText() { + t.wrappedError = errors.New("which blah") + err := t.matcher.Matches(func() { panic(17) }) + + ExpectThat(err, Error(Equals("which panicked with: 17, which blah"))) + ExpectFalse(isFatal(err)) +} diff --git a/vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go b/vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go new file mode 100644 index 0000000000..3bb72a702b --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/internal/oglematchers/pointee_test.go @@ -0,0 +1,152 @@ +// Copyright 2012 Aaron Jacobs. All Rights Reserved. +// Author: aaronjjacobs@gmail.com (Aaron Jacobs) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package oglematchers_test + +import ( + "errors" + . "github.com/smartystreets/assertions/internal/oglematchers" + . "github.com/smartystreets/assertions/internal/ogletest" + "testing" +) + +//////////////////////////////////////////////////////////////////////// +// Helpers +//////////////////////////////////////////////////////////////////////// + +type PointeeTest struct {} +func init() { RegisterTestSuite(&PointeeTest{}) } + +func TestPointee(t *testing.T) { RunTests(t) } + +//////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////// + +func (t *PointeeTest) Description() { + wrapped := &fakeMatcher{nil, "taco"} + matcher := Pointee(wrapped) + + ExpectEq("pointee(taco)", matcher.Description()) +} + +func (t *PointeeTest) CandidateIsNotAPointer() { + matcher := Pointee(HasSubstr("")) + err := matcher.Matches([]byte{}) + + ExpectThat(err, Error(Equals("which is not a pointer"))) + ExpectTrue(isFatal(err)) +} + +func (t *PointeeTest) CandidateIsANilLiteral() { + matcher := Pointee(HasSubstr("")) + err := matcher.Matches(nil) + + ExpectThat(err, Error(Equals("which is not a pointer"))) + ExpectTrue(isFatal(err)) +} + +func (t *PointeeTest) CandidateIsANilPointer() { + matcher := Pointee(HasSubstr("")) + err := matcher.Matches((*int)(nil)) + + ExpectThat(err, Error(Equals(""))) + ExpectTrue(isFatal(err)) +} + +func (t *PointeeTest) CallsWrapped() { + var suppliedCandidate interface{} + matchFunc := func(c interface{}) error { + suppliedCandidate = c + return nil + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Pointee(wrapped) + + someSlice := []byte{} + matcher.Matches(&someSlice) + ExpectThat(suppliedCandidate, IdenticalTo(someSlice)) +} + +func (t *PointeeTest) WrappedReturnsOkay() { + matchFunc := func(c interface{}) error { + return nil + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Pointee(wrapped) + + err := matcher.Matches(new(int)) + ExpectEq(nil, err) +} + +func (t *PointeeTest) WrappedReturnsNonFatalNonEmptyError() { + matchFunc := func(c interface{}) error { + return errors.New("taco") + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Pointee(wrapped) + + i := 17 + err := matcher.Matches(&i) + ExpectFalse(isFatal(err)) + ExpectThat(err, Error(Equals("taco"))) +} + +func (t *PointeeTest) WrappedReturnsNonFatalEmptyError() { + matchFunc := func(c interface{}) error { + return errors.New("") + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Pointee(wrapped) + + i := 17 + err := matcher.Matches(&i) + ExpectFalse(isFatal(err)) + ExpectThat(err, Error(HasSubstr("whose pointee"))) + ExpectThat(err, Error(HasSubstr("17"))) +} + +func (t *PointeeTest) WrappedReturnsFatalNonEmptyError() { + matchFunc := func(c interface{}) error { + return NewFatalError("taco") + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Pointee(wrapped) + + i := 17 + err := matcher.Matches(&i) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(Equals("taco"))) +} + +func (t *PointeeTest) WrappedReturnsFatalEmptyError() { + matchFunc := func(c interface{}) error { + return NewFatalError("") + } + + wrapped := &fakeMatcher{matchFunc, ""} + matcher := Pointee(wrapped) + + i := 17 + err := matcher.Matches(&i) + ExpectTrue(isFatal(err)) + ExpectThat(err, Error(HasSubstr("whose pointee"))) + ExpectThat(err, Error(HasSubstr("17"))) +} diff --git a/vendor/github.com/smartystreets/assertions/panic_test.go b/vendor/github.com/smartystreets/assertions/panic_test.go new file mode 100644 index 0000000000..15eafac4fb --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/panic_test.go @@ -0,0 +1,53 @@ +package assertions + +import ( + "fmt" + "testing" +) + +func TestShouldPanic(t *testing.T) { + fail(t, so(func() {}, ShouldPanic, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + fail(t, so(func() {}, ShouldPanic, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + + fail(t, so(1, ShouldPanic), shouldUseVoidNiladicFunction) + fail(t, so(func(i int) {}, ShouldPanic), shouldUseVoidNiladicFunction) + fail(t, so(func() int { panic("hi") }, ShouldPanic), shouldUseVoidNiladicFunction) + + fail(t, so(func() {}, ShouldPanic), shouldHavePanicked) + pass(t, so(func() { panic("hi") }, ShouldPanic)) +} + +func TestShouldNotPanic(t *testing.T) { + fail(t, so(func() {}, ShouldNotPanic, 1), "This assertion requires exactly 0 comparison values (you provided 1).") + fail(t, so(func() {}, ShouldNotPanic, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + + fail(t, so(1, ShouldNotPanic), shouldUseVoidNiladicFunction) + fail(t, so(func(i int) {}, ShouldNotPanic), shouldUseVoidNiladicFunction) + + fail(t, so(func() { panic("hi") }, ShouldNotPanic), fmt.Sprintf(shouldNotHavePanicked, "hi")) + pass(t, so(func() {}, ShouldNotPanic)) +} + +func TestShouldPanicWith(t *testing.T) { + fail(t, so(func() {}, ShouldPanicWith), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(func() {}, ShouldPanicWith, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(1, ShouldPanicWith, 1), shouldUseVoidNiladicFunction) + fail(t, so(func(i int) {}, ShouldPanicWith, "hi"), shouldUseVoidNiladicFunction) + fail(t, so(func() {}, ShouldPanicWith, "bye"), shouldHavePanicked) + fail(t, so(func() { panic("hi") }, ShouldPanicWith, "bye"), "bye|hi|Expected func() to panic with 'bye' (but it panicked with 'hi')!") + + pass(t, so(func() { panic("hi") }, ShouldPanicWith, "hi")) +} + +func TestShouldNotPanicWith(t *testing.T) { + fail(t, so(func() {}, ShouldNotPanicWith), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(func() {}, ShouldNotPanicWith, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(1, ShouldNotPanicWith, 1), shouldUseVoidNiladicFunction) + fail(t, so(func(i int) {}, ShouldNotPanicWith, "hi"), shouldUseVoidNiladicFunction) + fail(t, so(func() { panic("hi") }, ShouldNotPanicWith, "hi"), "Expected func() NOT to panic with 'hi' (but it did)!") + + pass(t, so(func() {}, ShouldNotPanicWith, "bye")) + pass(t, so(func() { panic("hi") }, ShouldNotPanicWith, "bye")) +} diff --git a/vendor/github.com/smartystreets/assertions/quantity_test.go b/vendor/github.com/smartystreets/assertions/quantity_test.go new file mode 100644 index 0000000000..7546e7250a --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/quantity_test.go @@ -0,0 +1,145 @@ +package assertions + +import "testing" + +func TestShouldBeGreaterThan(t *testing.T) { + fail(t, so(1, ShouldBeGreaterThan), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldBeGreaterThan, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(1, ShouldBeGreaterThan, 0)) + pass(t, so(1.1, ShouldBeGreaterThan, 1)) + pass(t, so(1, ShouldBeGreaterThan, uint(0))) + pass(t, so("b", ShouldBeGreaterThan, "a")) + + fail(t, so(0, ShouldBeGreaterThan, 1), "Expected '0' to be greater than '1' (but it wasn't)!") + fail(t, so(1, ShouldBeGreaterThan, 1.1), "Expected '1' to be greater than '1.1' (but it wasn't)!") + fail(t, so(uint(0), ShouldBeGreaterThan, 1.1), "Expected '0' to be greater than '1.1' (but it wasn't)!") + fail(t, so("a", ShouldBeGreaterThan, "b"), "Expected 'a' to be greater than 'b' (but it wasn't)!") +} + +func TestShouldBeGreaterThanOrEqual(t *testing.T) { + fail(t, so(1, ShouldBeGreaterThanOrEqualTo), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldBeGreaterThanOrEqualTo, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(1, ShouldBeGreaterThanOrEqualTo, 1)) + pass(t, so(1.1, ShouldBeGreaterThanOrEqualTo, 1.1)) + pass(t, so(1, ShouldBeGreaterThanOrEqualTo, uint(1))) + pass(t, so("b", ShouldBeGreaterThanOrEqualTo, "b")) + + pass(t, so(1, ShouldBeGreaterThanOrEqualTo, 0)) + pass(t, so(1.1, ShouldBeGreaterThanOrEqualTo, 1)) + pass(t, so(1, ShouldBeGreaterThanOrEqualTo, uint(0))) + pass(t, so("b", ShouldBeGreaterThanOrEqualTo, "a")) + + fail(t, so(0, ShouldBeGreaterThanOrEqualTo, 1), "Expected '0' to be greater than or equal to '1' (but it wasn't)!") + fail(t, so(1, ShouldBeGreaterThanOrEqualTo, 1.1), "Expected '1' to be greater than or equal to '1.1' (but it wasn't)!") + fail(t, so(uint(0), ShouldBeGreaterThanOrEqualTo, 1.1), "Expected '0' to be greater than or equal to '1.1' (but it wasn't)!") + fail(t, so("a", ShouldBeGreaterThanOrEqualTo, "b"), "Expected 'a' to be greater than or equal to 'b' (but it wasn't)!") +} + +func TestShouldBeLessThan(t *testing.T) { + fail(t, so(1, ShouldBeLessThan), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldBeLessThan, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(0, ShouldBeLessThan, 1)) + pass(t, so(1, ShouldBeLessThan, 1.1)) + pass(t, so(uint(0), ShouldBeLessThan, 1)) + pass(t, so("a", ShouldBeLessThan, "b")) + + fail(t, so(1, ShouldBeLessThan, 0), "Expected '1' to be less than '0' (but it wasn't)!") + fail(t, so(1.1, ShouldBeLessThan, 1), "Expected '1.1' to be less than '1' (but it wasn't)!") + fail(t, so(1.1, ShouldBeLessThan, uint(0)), "Expected '1.1' to be less than '0' (but it wasn't)!") + fail(t, so("b", ShouldBeLessThan, "a"), "Expected 'b' to be less than 'a' (but it wasn't)!") +} + +func TestShouldBeLessThanOrEqualTo(t *testing.T) { + fail(t, so(1, ShouldBeLessThanOrEqualTo), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldBeLessThanOrEqualTo, 0, 0), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so(1, ShouldBeLessThanOrEqualTo, 1)) + pass(t, so(1.1, ShouldBeLessThanOrEqualTo, 1.1)) + pass(t, so(uint(1), ShouldBeLessThanOrEqualTo, 1)) + pass(t, so("b", ShouldBeLessThanOrEqualTo, "b")) + + pass(t, so(0, ShouldBeLessThanOrEqualTo, 1)) + pass(t, so(1, ShouldBeLessThanOrEqualTo, 1.1)) + pass(t, so(uint(0), ShouldBeLessThanOrEqualTo, 1)) + pass(t, so("a", ShouldBeLessThanOrEqualTo, "b")) + + fail(t, so(1, ShouldBeLessThanOrEqualTo, 0), "Expected '1' to be less than '0' (but it wasn't)!") + fail(t, so(1.1, ShouldBeLessThanOrEqualTo, 1), "Expected '1.1' to be less than '1' (but it wasn't)!") + fail(t, so(1.1, ShouldBeLessThanOrEqualTo, uint(0)), "Expected '1.1' to be less than '0' (but it wasn't)!") + fail(t, so("b", ShouldBeLessThanOrEqualTo, "a"), "Expected 'b' to be less than 'a' (but it wasn't)!") +} + +func TestShouldBeBetween(t *testing.T) { + fail(t, so(1, ShouldBeBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(1, ShouldBeBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(4, ShouldBeBetween, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + fail(t, so(7, ShouldBeBetween, 8, 12), "Expected '7' to be between '8' and '12' (but it wasn't)!") + fail(t, so(8, ShouldBeBetween, 8, 12), "Expected '8' to be between '8' and '12' (but it wasn't)!") + pass(t, so(9, ShouldBeBetween, 8, 12)) + pass(t, so(10, ShouldBeBetween, 8, 12)) + pass(t, so(11, ShouldBeBetween, 8, 12)) + fail(t, so(12, ShouldBeBetween, 8, 12), "Expected '12' to be between '8' and '12' (but it wasn't)!") + fail(t, so(13, ShouldBeBetween, 8, 12), "Expected '13' to be between '8' and '12' (but it wasn't)!") + + pass(t, so(1, ShouldBeBetween, 2, 0)) + fail(t, so(-1, ShouldBeBetween, 2, 0), "Expected '-1' to be between '0' and '2' (but it wasn't)!") +} + +func TestShouldNotBeBetween(t *testing.T) { + fail(t, so(1, ShouldNotBeBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(1, ShouldNotBeBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(4, ShouldNotBeBetween, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + pass(t, so(7, ShouldNotBeBetween, 8, 12)) + pass(t, so(8, ShouldNotBeBetween, 8, 12)) + fail(t, so(9, ShouldNotBeBetween, 8, 12), "Expected '9' NOT to be between '8' and '12' (but it was)!") + fail(t, so(10, ShouldNotBeBetween, 8, 12), "Expected '10' NOT to be between '8' and '12' (but it was)!") + fail(t, so(11, ShouldNotBeBetween, 8, 12), "Expected '11' NOT to be between '8' and '12' (but it was)!") + pass(t, so(12, ShouldNotBeBetween, 8, 12)) + pass(t, so(13, ShouldNotBeBetween, 8, 12)) + + pass(t, so(-1, ShouldNotBeBetween, 2, 0)) + fail(t, so(1, ShouldNotBeBetween, 2, 0), "Expected '1' NOT to be between '0' and '2' (but it was)!") +} + +func TestShouldBeBetweenOrEqual(t *testing.T) { + fail(t, so(1, ShouldBeBetweenOrEqual), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(1, ShouldBeBetweenOrEqual, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(4, ShouldBeBetweenOrEqual, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + fail(t, so(7, ShouldBeBetweenOrEqual, 8, 12), "Expected '7' to be between '8' and '12' or equal to one of them (but it wasn't)!") + pass(t, so(8, ShouldBeBetweenOrEqual, 8, 12)) + pass(t, so(9, ShouldBeBetweenOrEqual, 8, 12)) + pass(t, so(10, ShouldBeBetweenOrEqual, 8, 12)) + pass(t, so(11, ShouldBeBetweenOrEqual, 8, 12)) + pass(t, so(12, ShouldBeBetweenOrEqual, 8, 12)) + fail(t, so(13, ShouldBeBetweenOrEqual, 8, 12), "Expected '13' to be between '8' and '12' or equal to one of them (but it wasn't)!") + + pass(t, so(1, ShouldBeBetweenOrEqual, 2, 0)) + fail(t, so(-1, ShouldBeBetweenOrEqual, 2, 0), "Expected '-1' to be between '0' and '2' or equal to one of them (but it wasn't)!") +} + +func TestShouldNotBeBetweenOrEqual(t *testing.T) { + fail(t, so(1, ShouldNotBeBetweenOrEqual), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(1, ShouldNotBeBetweenOrEqual, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(4, ShouldNotBeBetweenOrEqual, 1, 1), "The lower and upper bounds must be different values (they were both '1').") + + pass(t, so(7, ShouldNotBeBetweenOrEqual, 8, 12)) + fail(t, so(8, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '8' NOT to be between '8' and '12' or equal to one of them (but it was)!") + fail(t, so(9, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '9' NOT to be between '8' and '12' or equal to one of them (but it was)!") + fail(t, so(10, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '10' NOT to be between '8' and '12' or equal to one of them (but it was)!") + fail(t, so(11, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '11' NOT to be between '8' and '12' or equal to one of them (but it was)!") + fail(t, so(12, ShouldNotBeBetweenOrEqual, 8, 12), "Expected '12' NOT to be between '8' and '12' or equal to one of them (but it was)!") + pass(t, so(13, ShouldNotBeBetweenOrEqual, 8, 12)) + + pass(t, so(-1, ShouldNotBeBetweenOrEqual, 2, 0)) + fail(t, so(1, ShouldNotBeBetweenOrEqual, 2, 0), "Expected '1' NOT to be between '0' and '2' or equal to one of them (but it was)!") +} diff --git a/vendor/github.com/smartystreets/assertions/serializer_test.go b/vendor/github.com/smartystreets/assertions/serializer_test.go new file mode 100644 index 0000000000..597b40ac18 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/serializer_test.go @@ -0,0 +1,36 @@ +package assertions + +import ( + "encoding/json" + "fmt" + "testing" +) + +func TestSerializerCreatesSerializedVersionOfAssertionResult(t *testing.T) { + thing1 := Thing1{"Hi"} + thing2 := Thing2{"Bye"} + message := "Super-hip failure message." + serializer := newSerializer() + + actualResult := serializer.serialize(thing1, thing2, message) + + expectedResult, _ := json.Marshal(FailureView{ + Message: message, + Expected: fmt.Sprintf("%+v", thing1), + Actual: fmt.Sprintf("%+v", thing2), + }) + + if actualResult != string(expectedResult) { + t.Errorf("\nExpected: %s\nActual: %s", string(expectedResult), actualResult) + } + + actualResult = serializer.serializeDetailed(thing1, thing2, message) + expectedResult, _ = json.Marshal(FailureView{ + Message: message, + Expected: fmt.Sprintf("%#v", thing1), + Actual: fmt.Sprintf("%#v", thing2), + }) + if actualResult != string(expectedResult) { + t.Errorf("\nExpected: %s\nActual: %s", string(expectedResult), actualResult) + } +} diff --git a/vendor/github.com/smartystreets/assertions/strings_test.go b/vendor/github.com/smartystreets/assertions/strings_test.go new file mode 100644 index 0000000000..ad8d0c8858 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/strings_test.go @@ -0,0 +1,118 @@ +package assertions + +import "testing" + +func TestShouldStartWith(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so("", ShouldStartWith), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so("", ShouldStartWith, "asdf", "asdf"), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so("", ShouldStartWith, "")) + fail(t, so("", ShouldStartWith, "x"), "x||Expected '' to start with 'x' (but it didn't)!") + pass(t, so("abc", ShouldStartWith, "abc")) + fail(t, so("abc", ShouldStartWith, "abcd"), "abcd|abc|Expected 'abc' to start with 'abcd' (but it didn't)!") + + pass(t, so("superman", ShouldStartWith, "super")) + fail(t, so("superman", ShouldStartWith, "bat"), "bat|sup...|Expected 'superman' to start with 'bat' (but it didn't)!") + fail(t, so("superman", ShouldStartWith, "man"), "man|sup...|Expected 'superman' to start with 'man' (but it didn't)!") + + fail(t, so(1, ShouldStartWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func TestShouldNotStartWith(t *testing.T) { + fail(t, so("", ShouldNotStartWith), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so("", ShouldNotStartWith, "asdf", "asdf"), "This assertion requires exactly 1 comparison values (you provided 2).") + + fail(t, so("", ShouldNotStartWith, ""), "Expected '' NOT to start with '' (but it did)!") + fail(t, so("superman", ShouldNotStartWith, "super"), "Expected 'superman' NOT to start with 'super' (but it did)!") + pass(t, so("superman", ShouldNotStartWith, "bat")) + pass(t, so("superman", ShouldNotStartWith, "man")) + + fail(t, so(1, ShouldNotStartWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func TestShouldEndWith(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so("", ShouldEndWith), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so("", ShouldEndWith, "", ""), "This assertion requires exactly 1 comparison values (you provided 2).") + + pass(t, so("", ShouldEndWith, "")) + fail(t, so("", ShouldEndWith, "z"), "z||Expected '' to end with 'z' (but it didn't)!") + pass(t, so("xyz", ShouldEndWith, "xyz")) + fail(t, so("xyz", ShouldEndWith, "wxyz"), "wxyz|xyz|Expected 'xyz' to end with 'wxyz' (but it didn't)!") + + pass(t, so("superman", ShouldEndWith, "man")) + fail(t, so("superman", ShouldEndWith, "super"), "super|...erman|Expected 'superman' to end with 'super' (but it didn't)!") + fail(t, so("superman", ShouldEndWith, "blah"), "blah|...rman|Expected 'superman' to end with 'blah' (but it didn't)!") + + fail(t, so(1, ShouldEndWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func TestShouldNotEndWith(t *testing.T) { + fail(t, so("", ShouldNotEndWith), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so("", ShouldNotEndWith, "", ""), "This assertion requires exactly 1 comparison values (you provided 2).") + + fail(t, so("", ShouldNotEndWith, ""), "Expected '' NOT to end with '' (but it did)!") + fail(t, so("superman", ShouldNotEndWith, "man"), "Expected 'superman' NOT to end with 'man' (but it did)!") + pass(t, so("superman", ShouldNotEndWith, "super")) + + fail(t, so(1, ShouldNotEndWith, 2), "Both arguments to this assertion must be strings (you provided int and int).") +} + +func TestShouldContainSubstring(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so("asdf", ShouldContainSubstring), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so("asdf", ShouldContainSubstring, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(123, ShouldContainSubstring, 23), "Both arguments to this assertion must be strings (you provided int and int).") + + pass(t, so("asdf", ShouldContainSubstring, "sd")) + fail(t, so("qwer", ShouldContainSubstring, "sd"), "sd|qwer|Expected 'qwer' to contain substring 'sd' (but it didn't)!") +} + +func TestShouldNotContainSubstring(t *testing.T) { + fail(t, so("asdf", ShouldNotContainSubstring), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so("asdf", ShouldNotContainSubstring, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(123, ShouldNotContainSubstring, 23), "Both arguments to this assertion must be strings (you provided int and int).") + + pass(t, so("qwer", ShouldNotContainSubstring, "sd")) + fail(t, so("asdf", ShouldNotContainSubstring, "sd"), "Expected 'asdf' NOT to contain substring 'sd' (but it did)!") +} + +func TestShouldBeBlank(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so("", ShouldBeBlank, "adsf"), "This assertion requires exactly 0 comparison values (you provided 1).") + fail(t, so(1, ShouldBeBlank), "The argument to this assertion must be a string (you provided int).") + + fail(t, so("asdf", ShouldBeBlank), "|asdf|Expected 'asdf' to be blank (but it wasn't)!") + pass(t, so("", ShouldBeBlank)) +} + +func TestShouldNotBeBlank(t *testing.T) { + fail(t, so("", ShouldNotBeBlank, "adsf"), "This assertion requires exactly 0 comparison values (you provided 1).") + fail(t, so(1, ShouldNotBeBlank), "The argument to this assertion must be a string (you provided int).") + + fail(t, so("", ShouldNotBeBlank), "Expected value to NOT be blank (but it was)!") + pass(t, so("asdf", ShouldNotBeBlank)) +} + +func TestShouldEqualWithout(t *testing.T) { + fail(t, so("", ShouldEqualWithout, ""), "This assertion requires exactly 2 comparison values (you provided 1).") + fail(t, so(1, ShouldEqualWithout, 2, 3), "All arguments to this assertion must be strings (you provided: [int int int]).") + + fail(t, so("asdf", ShouldEqualWithout, "qwer", "q"), "Expected 'asdf' to equal 'qwer' but without any 'q' (but it didn't).") + pass(t, so("asdf", ShouldEqualWithout, "df", "as")) +} + +func TestShouldEqualTrimSpace(t *testing.T) { + fail(t, so(" asdf ", ShouldEqualTrimSpace), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldEqualTrimSpace, 2), "Both arguments to this assertion must be strings (you provided int and int).") + + fail(t, so("asdf", ShouldEqualTrimSpace, "qwer"), "qwer|asdf|Expected: 'qwer' Actual: 'asdf' (Should be equal)") + pass(t, so(" asdf\t\n", ShouldEqualTrimSpace, "asdf")) +} diff --git a/vendor/github.com/smartystreets/assertions/time_test.go b/vendor/github.com/smartystreets/assertions/time_test.go new file mode 100644 index 0000000000..f9dda8f8f3 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/time_test.go @@ -0,0 +1,159 @@ +package assertions + +import ( + "fmt" + "testing" + "time" +) + +func TestShouldHappenBefore(t *testing.T) { + fail(t, so(0, ShouldHappenBefore), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(0, ShouldHappenBefore, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(0, ShouldHappenBefore, 1), shouldUseTimes) + fail(t, so(0, ShouldHappenBefore, time.Now()), shouldUseTimes) + fail(t, so(time.Now(), ShouldHappenBefore, 0), shouldUseTimes) + + fail(t, so(january3, ShouldHappenBefore, january1), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '48h0m0s' after)!", pretty(january3), pretty(january1))) + fail(t, so(january3, ShouldHappenBefore, january3), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '0' after)!", pretty(january3), pretty(january3))) + pass(t, so(january1, ShouldHappenBefore, january3)) +} + +func TestShouldHappenOnOrBefore(t *testing.T) { + fail(t, so(0, ShouldHappenOnOrBefore), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(0, ShouldHappenOnOrBefore, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(0, ShouldHappenOnOrBefore, 1), shouldUseTimes) + fail(t, so(0, ShouldHappenOnOrBefore, time.Now()), shouldUseTimes) + fail(t, so(time.Now(), ShouldHappenOnOrBefore, 0), shouldUseTimes) + + fail(t, so(january3, ShouldHappenOnOrBefore, january1), fmt.Sprintf("Expected '%s' to happen before '%s' (it happened '48h0m0s' after)!", pretty(january3), pretty(january1))) + pass(t, so(january3, ShouldHappenOnOrBefore, january3)) + pass(t, so(january1, ShouldHappenOnOrBefore, january3)) +} + +func TestShouldHappenAfter(t *testing.T) { + fail(t, so(0, ShouldHappenAfter), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(0, ShouldHappenAfter, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(0, ShouldHappenAfter, 1), shouldUseTimes) + fail(t, so(0, ShouldHappenAfter, time.Now()), shouldUseTimes) + fail(t, so(time.Now(), ShouldHappenAfter, 0), shouldUseTimes) + + fail(t, so(january1, ShouldHappenAfter, january2), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '24h0m0s' before)!", pretty(january1), pretty(january2))) + fail(t, so(january1, ShouldHappenAfter, january1), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '0' before)!", pretty(january1), pretty(january1))) + pass(t, so(january3, ShouldHappenAfter, january1)) +} + +func TestShouldHappenOnOrAfter(t *testing.T) { + fail(t, so(0, ShouldHappenOnOrAfter), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(0, ShouldHappenOnOrAfter, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(0, ShouldHappenOnOrAfter, 1), shouldUseTimes) + fail(t, so(0, ShouldHappenOnOrAfter, time.Now()), shouldUseTimes) + fail(t, so(time.Now(), ShouldHappenOnOrAfter, 0), shouldUseTimes) + + fail(t, so(january1, ShouldHappenOnOrAfter, january2), fmt.Sprintf("Expected '%s' to happen after '%s' (it happened '24h0m0s' before)!", pretty(january1), pretty(january2))) + pass(t, so(january1, ShouldHappenOnOrAfter, january1)) + pass(t, so(january3, ShouldHappenOnOrAfter, january1)) +} + +func TestShouldHappenBetween(t *testing.T) { + fail(t, so(0, ShouldHappenBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(0, ShouldHappenBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(0, ShouldHappenBetween, 1, 2), shouldUseTimes) + fail(t, so(0, ShouldHappenBetween, time.Now(), time.Now()), shouldUseTimes) + fail(t, so(time.Now(), ShouldHappenBetween, 0, time.Now()), shouldUseTimes) + fail(t, so(time.Now(), ShouldHappenBetween, time.Now(), 9), shouldUseTimes) + + fail(t, so(january1, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4))) + fail(t, so(january2, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '0' outside threshold)!", pretty(january2), pretty(january2), pretty(january4))) + pass(t, so(january3, ShouldHappenBetween, january2, january4)) + fail(t, so(january4, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '0' outside threshold)!", pretty(january4), pretty(january2), pretty(january4))) + fail(t, so(january5, ShouldHappenBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4))) +} + +func TestShouldHappenOnOrBetween(t *testing.T) { + fail(t, so(0, ShouldHappenOnOrBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(0, ShouldHappenOnOrBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(0, ShouldHappenOnOrBetween, 1, time.Now()), shouldUseTimes) + fail(t, so(0, ShouldHappenOnOrBetween, time.Now(), 1), shouldUseTimes) + fail(t, so(time.Now(), ShouldHappenOnOrBetween, 0, 1), shouldUseTimes) + + fail(t, so(january1, ShouldHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4))) + pass(t, so(january2, ShouldHappenOnOrBetween, january2, january4)) + pass(t, so(january3, ShouldHappenOnOrBetween, january2, january4)) + pass(t, so(january4, ShouldHappenOnOrBetween, january2, january4)) + fail(t, so(january5, ShouldHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4))) +} + +func TestShouldNotHappenOnOrBetween(t *testing.T) { + fail(t, so(0, ShouldNotHappenOnOrBetween), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(0, ShouldNotHappenOnOrBetween, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(0, ShouldNotHappenOnOrBetween, 1, time.Now()), shouldUseTimes) + fail(t, so(0, ShouldNotHappenOnOrBetween, time.Now(), 1), shouldUseTimes) + fail(t, so(time.Now(), ShouldNotHappenOnOrBetween, 0, 1), shouldUseTimes) + + pass(t, so(january1, ShouldNotHappenOnOrBetween, january2, january4)) + fail(t, so(january2, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january2), pretty(january2), pretty(january4))) + fail(t, so(january3, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january3), pretty(january2), pretty(january4))) + fail(t, so(january4, ShouldNotHappenOnOrBetween, january2, january4), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january4), pretty(january2), pretty(january4))) + pass(t, so(january5, ShouldNotHappenOnOrBetween, january2, january4)) +} + +func TestShouldHappenWithin(t *testing.T) { + fail(t, so(0, ShouldHappenWithin), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(0, ShouldHappenWithin, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(0, ShouldHappenWithin, 1, 2), shouldUseDurationAndTime) + fail(t, so(0, ShouldHappenWithin, oneDay, time.Now()), shouldUseDurationAndTime) + fail(t, so(time.Now(), ShouldHappenWithin, 0, time.Now()), shouldUseDurationAndTime) + + fail(t, so(january1, ShouldHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january1), pretty(january2), pretty(january4))) + pass(t, so(january2, ShouldHappenWithin, oneDay, january3)) + pass(t, so(january3, ShouldHappenWithin, oneDay, january3)) + pass(t, so(january4, ShouldHappenWithin, oneDay, january3)) + fail(t, so(january5, ShouldHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to happen between '%s' and '%s' (it happened '24h0m0s' outside threshold)!", pretty(january5), pretty(january2), pretty(january4))) +} + +func TestShouldNotHappenWithin(t *testing.T) { + fail(t, so(0, ShouldNotHappenWithin), "This assertion requires exactly 2 comparison values (you provided 0).") + fail(t, so(0, ShouldNotHappenWithin, 1, 2, 3), "This assertion requires exactly 2 comparison values (you provided 3).") + + fail(t, so(0, ShouldNotHappenWithin, 1, 2), shouldUseDurationAndTime) + fail(t, so(0, ShouldNotHappenWithin, oneDay, time.Now()), shouldUseDurationAndTime) + fail(t, so(time.Now(), ShouldNotHappenWithin, 0, time.Now()), shouldUseDurationAndTime) + + pass(t, so(january1, ShouldNotHappenWithin, oneDay, january3)) + fail(t, so(january2, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january2), pretty(january2), pretty(january4))) + fail(t, so(january3, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january3), pretty(january2), pretty(january4))) + fail(t, so(january4, ShouldNotHappenWithin, oneDay, january3), fmt.Sprintf("Expected '%s' to NOT happen on or between '%s' and '%s' (but it did)!", pretty(january4), pretty(january2), pretty(january4))) + pass(t, so(january5, ShouldNotHappenWithin, oneDay, january3)) +} + +func TestShouldBeChronological(t *testing.T) { + fail(t, so(0, ShouldBeChronological, 1, 2, 3), "This assertion requires exactly 0 comparison values (you provided 3).") + fail(t, so(0, ShouldBeChronological), shouldUseTimeSlice) + fail(t, so([]time.Time{january5, january1}, ShouldBeChronological), + "The 'Time' at index [1] should have happened after the previous one (but it didn't!):\n [0]: 2013-01-05 00:00:00 +0000 UTC\n [1]: 2013-01-01 00:00:00 +0000 UTC (see, it happened before!)") + + pass(t, so([]time.Time{january1, january2, january3, january4, january5}, ShouldBeChronological)) +} + +const layout = "2006-01-02 15:04" + +var january1, _ = time.Parse(layout, "2013-01-01 00:00") +var january2, _ = time.Parse(layout, "2013-01-02 00:00") +var january3, _ = time.Parse(layout, "2013-01-03 00:00") +var january4, _ = time.Parse(layout, "2013-01-04 00:00") +var january5, _ = time.Parse(layout, "2013-01-05 00:00") + +var oneDay, _ = time.ParseDuration("24h0m0s") +var twoDays, _ = time.ParseDuration("48h0m0s") + +func pretty(t time.Time) string { + return fmt.Sprintf("%v", t) +} diff --git a/vendor/github.com/smartystreets/assertions/type_test.go b/vendor/github.com/smartystreets/assertions/type_test.go new file mode 100644 index 0000000000..4b8d198467 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/type_test.go @@ -0,0 +1,76 @@ +package assertions + +import ( + "bytes" + "io" + "net/http" + "testing" +) + +func TestShouldHaveSameTypeAs(t *testing.T) { + serializer = newFakeSerializer() + + fail(t, so(1, ShouldHaveSameTypeAs), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldHaveSameTypeAs, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(nil, ShouldHaveSameTypeAs, 0), "int||Expected '' to be: 'int' (but was: '')!") + fail(t, so(1, ShouldHaveSameTypeAs, "asdf"), "string|int|Expected '1' to be: 'string' (but was: 'int')!") + + pass(t, so(1, ShouldHaveSameTypeAs, 0)) + pass(t, so(nil, ShouldHaveSameTypeAs, nil)) +} + +func TestShouldNotHaveSameTypeAs(t *testing.T) { + fail(t, so(1, ShouldNotHaveSameTypeAs), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(1, ShouldNotHaveSameTypeAs, 1, 2, 3), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(1, ShouldNotHaveSameTypeAs, 0), "Expected '1' to NOT be: 'int' (but it was)!") + fail(t, so(nil, ShouldNotHaveSameTypeAs, nil), "Expected '' to NOT be: '' (but it was)!") + + pass(t, so(nil, ShouldNotHaveSameTypeAs, 0)) + pass(t, so(1, ShouldNotHaveSameTypeAs, "asdf")) +} + +func TestShouldImplement(t *testing.T) { + var ioReader *io.Reader = nil + var response http.Response = http.Response{} + var responsePtr *http.Response = new(http.Response) + var reader = bytes.NewBufferString("") + + fail(t, so(reader, ShouldImplement), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(reader, ShouldImplement, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 2).") + fail(t, so(reader, ShouldImplement, ioReader, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(reader, ShouldImplement, "foo"), shouldCompareWithInterfacePointer) + fail(t, so(reader, ShouldImplement, 1), shouldCompareWithInterfacePointer) + fail(t, so(reader, ShouldImplement, nil), shouldCompareWithInterfacePointer) + + fail(t, so(nil, ShouldImplement, ioReader), shouldNotBeNilActual) + fail(t, so(1, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*int' does not implement the interface!") + + fail(t, so(response, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*http.Response' does not implement the interface!") + fail(t, so(responsePtr, ShouldImplement, ioReader), "Expected: 'io.Reader interface support'\nActual: '*http.Response' does not implement the interface!") + pass(t, so(reader, ShouldImplement, ioReader)) + pass(t, so(reader, ShouldImplement, (*io.Reader)(nil))) +} + +func TestShouldNotImplement(t *testing.T) { + var ioReader *io.Reader = nil + var response http.Response = http.Response{} + var responsePtr *http.Response = new(http.Response) + var reader io.Reader = bytes.NewBufferString("") + + fail(t, so(reader, ShouldNotImplement), "This assertion requires exactly 1 comparison values (you provided 0).") + fail(t, so(reader, ShouldNotImplement, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 2).") + fail(t, so(reader, ShouldNotImplement, ioReader, ioReader, ioReader), "This assertion requires exactly 1 comparison values (you provided 3).") + + fail(t, so(reader, ShouldNotImplement, "foo"), shouldCompareWithInterfacePointer) + fail(t, so(reader, ShouldNotImplement, 1), shouldCompareWithInterfacePointer) + fail(t, so(reader, ShouldNotImplement, nil), shouldCompareWithInterfacePointer) + + fail(t, so(reader, ShouldNotImplement, ioReader), "Expected '*bytes.Buffer'\nto NOT implement 'io.Reader' (but it did)!") + fail(t, so(nil, ShouldNotImplement, ioReader), shouldNotBeNilActual) + pass(t, so(1, ShouldNotImplement, ioReader)) + pass(t, so(response, ShouldNotImplement, ioReader)) + pass(t, so(responsePtr, ShouldNotImplement, ioReader)) +} diff --git a/vendor/github.com/smartystreets/assertions/utilities_for_test.go b/vendor/github.com/smartystreets/assertions/utilities_for_test.go new file mode 100644 index 0000000000..7243ebcb93 --- /dev/null +++ b/vendor/github.com/smartystreets/assertions/utilities_for_test.go @@ -0,0 +1,75 @@ +package assertions + +import ( + "fmt" + "path" + "runtime" + "strings" + "testing" +) + +func pass(t *testing.T, result string) { + if result != success { + _, file, line, _ := runtime.Caller(1) + base := path.Base(file) + t.Errorf("Expectation should have passed but failed (see %s: line %d): '%s'", base, line, result) + } +} + +func fail(t *testing.T, actual string, expected string) { + actual = format(actual) + expected = format(expected) + + if actual != expected { + if actual == "" { + actual = "(empty)" + } + _, file, line, _ := runtime.Caller(1) + base := path.Base(file) + t.Errorf("Expectation should have failed but passed (see %s: line %d). \nExpected: %s\nActual: %s\n", + base, line, expected, actual) + } +} +func format(message string) string { + message = strings.Replace(message, "\n", " ", -1) + for strings.Contains(message, " ") { + message = strings.Replace(message, " ", " ", -1) + } + return message +} + +type Thing1 struct { + a string +} +type Thing2 struct { + a string +} + +type Thinger interface { + Hi() +} + +type Thing struct{} + +func (self *Thing) Hi() {} + +type IntAlias int +type StringAlias string +type StringSliceAlias []string +type StringStringMapAlias map[string]string + +/******** FakeSerialzier ********/ + +type fakeSerializer struct{} + +func (self *fakeSerializer) serialize(expected, actual interface{}, message string) string { + return fmt.Sprintf("%v|%v|%s", expected, actual, message) +} + +func (self *fakeSerializer) serializeDetailed(expected, actual interface{}, message string) string { + return fmt.Sprintf("%v|%v|%s", expected, actual, message) +} + +func newFakeSerializer() *fakeSerializer { + return new(fakeSerializer) +} diff --git a/vendor/github.com/smartystreets/goconvey/.gitignore b/vendor/github.com/smartystreets/goconvey/.gitignore new file mode 100644 index 0000000000..c9205c5335 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/.gitignore @@ -0,0 +1,5 @@ +.DS_Store +Thumbs.db +examples/output.json +web/client/reports/ +/.idea \ No newline at end of file diff --git a/vendor/github.com/smartystreets/goconvey/.travis.yml b/vendor/github.com/smartystreets/goconvey/.travis.yml new file mode 100644 index 0000000000..a5124b0491 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + +install: + - go get -t ./... + +script: go test -short -v ./... + +sudo: false diff --git a/vendor/github.com/smartystreets/goconvey/CONTRIBUTING.md b/vendor/github.com/smartystreets/goconvey/CONTRIBUTING.md new file mode 100644 index 0000000000..9c9053b83a --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Subject: GoConvey maintainers wanted + +We'd like to open the project up to additional maintainers who want to move the project forward in a meaningful way. + +We've spent significant time at SmartyStreets building GoConvey and it has perfectly met (and exceeded) all of our initial design specifications. We've used it to great effect. Being so well-matched to our development workflows at SmartyStreets, we haven't had a need to hack on it lately. This had been frustrating to many in the community who have ideas for the project and would like to see new features released (and some old bugs fixed). The release of Go 1.5 and the new vendoring experiment has been a source of confusion and hassle for those who have already upgraded and find that GoConvey needs to be brought up to speed. + +Comment below if you're interested. Preference will be given to those that have already contributed to the project. Checkout the issues listing if you need some ideas for contributing. + +GoConvey is a popular 2-pronged, open-source github project (1,600+ stargazers, 100+ forks): + +- A package you import in your test code that allows you to write BDD-style tests. +- An executable that runs a local web server which displays auto-updating test results in a web browser. + +---- + +- http://goconvey.co/ +- https://github.com/smartystreets/goconvey +- https://github.com/smartystreets/goconvey/wiki + +_I should mention that the [assertions package](https://github.com/smartystreets/assertions) imported by the convey package is used by other projects at SmartyStreets and so we will be continuing to maintain that project internally._ + +We hope to hear from you soon. Thanks! diff --git a/vendor/github.com/smartystreets/goconvey/README.md b/vendor/github.com/smartystreets/goconvey/README.md new file mode 100644 index 0000000000..a07ce5a6eb --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/README.md @@ -0,0 +1,126 @@ +GoConvey is awesome Go testing +============================== + +[![Build Status](https://travis-ci.org/smartystreets/goconvey.png)](https://travis-ci.org/smartystreets/goconvey) +[![GoDoc](https://godoc.org/github.com/smartystreets/goconvey?status.svg)](http://godoc.org/github.com/smartystreets/goconvey) + + +Welcome to GoConvey, a yummy Go testing tool for gophers. Works with `go test`. Use it in the terminal or browser according to your viewing pleasure. **[View full feature tour.](http://goconvey.co)** + +**Features:** + +- Directly integrates with `go test` +- Fully-automatic web UI (works with native Go tests, too) +- Huge suite of regression tests +- Shows test coverage (Go 1.2+) +- Readable, colorized console output (understandable by any manager, IT or not) +- Test code generator +- Desktop notifications (optional) +- Immediately open problem lines in [Sublime Text](http://www.sublimetext.com) ([some assembly required](https://github.com/asuth/subl-handler)) + + +You can ask questions about how to use GoConvey on [StackOverflow](http://stackoverflow.com/questions/ask?tags=goconvey,go&title=GoConvey%3A%20). Use the tags `go` and `goconvey`. + +**Menu:** + +- [Installation](#installation) +- [Quick start](#quick-start) +- [Documentation](#documentation) +- [Screenshots](#screenshots) +- [Contributors](#contributors-thanks) + + + + +Installation +------------ + + $ go get github.com/smartystreets/goconvey + +[Quick start](https://github.com/smartystreets/goconvey/wiki#get-going-in-25-seconds) +----------- + +Make a test, for example: + +```go +package package_name + +import ( + "testing" + . "github.com/smartystreets/goconvey/convey" +) + +func TestSpec(t *testing.T) { + + // Only pass t into top-level Convey calls + Convey("Given some integer with a starting value", t, func() { + x := 1 + + Convey("When the integer is incremented", func() { + x++ + + Convey("The value should be greater by one", func() { + So(x, ShouldEqual, 2) + }) + }) + }) +} +``` + + +#### [In the browser](https://github.com/smartystreets/goconvey/wiki/Web-UI) + +Start up the GoConvey web server at your project's path: + + $ $GOPATH/bin/goconvey + +Then watch the test results display in your browser at: + + http://localhost:8080 + + +If the browser doesn't open automatically, please click [http://localhost:8080](http://localhost:8080) to open manually. + +There you have it. +![](http://d79i1fxsrar4t.cloudfront.net/goconvey.co/gc-1-dark.png) +As long as GoConvey is running, test results will automatically update in your browser window. + +![](http://d79i1fxsrar4t.cloudfront.net/goconvey.co/gc-5-dark.png) +The design is responsive, so you can squish the browser real tight if you need to put it beside your code. + + +The [web UI](https://github.com/smartystreets/goconvey/wiki/Web-UI) supports traditional Go tests, so use it even if you're not using GoConvey tests. + + + +#### [In the terminal](https://github.com/smartystreets/goconvey/wiki/Execution) + +Just do what you do best: + + $ go test + +Or if you want the output to include the story: + + $ go test -v + + +[Documentation](https://github.com/smartystreets/goconvey/wiki) + +----------- + +Check out the + +- [GoConvey wiki](https://github.com/smartystreets/goconvey/wiki), +- [![GoDoc](https://godoc.org/github.com/smartystreets/goconvey?status.png)](http://godoc.org/github.com/smartystreets/goconvey) +- and the *_test.go files scattered throughout this project. + +[Screenshots](http://goconvey.co) + +----------- + +For web UI and terminal screenshots, check out [the full feature tour](http://goconvey.co). + + +---------------------- + +GoConvey is brought to you by [SmartyStreets](https://github.com/smartystreets) and [several contributors](https://github.com/smartystreets/goconvey/graphs/contributors) (Thanks!). diff --git a/vendor/github.com/smartystreets/goconvey/convey/focused_execution_test.go b/vendor/github.com/smartystreets/goconvey/convey/focused_execution_test.go new file mode 100644 index 0000000000..294e32fa17 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/focused_execution_test.go @@ -0,0 +1,72 @@ +package convey + +import "testing" + +func TestFocusOnlyAtTopLevel(t *testing.T) { + output := prepare() + + FocusConvey("hi", t, func() { + output += "done" + }) + + expectEqual(t, "done", output) +} + +func TestFocus(t *testing.T) { + output := prepare() + + FocusConvey("hi", t, func() { + output += "1" + + Convey("bye", func() { + output += "2" + }) + }) + + expectEqual(t, "1", output) +} + +func TestNestedFocus(t *testing.T) { + output := prepare() + + FocusConvey("hi", t, func() { + output += "1" + + Convey("This shouldn't run", func() { + output += "boink!" + }) + + FocusConvey("This should run", func() { + output += "2" + + FocusConvey("The should run too", func() { + output += "3" + + }) + + Convey("The should NOT run", func() { + output += "blah blah blah!" + }) + }) + }) + + expectEqual(t, "123", output) +} + +func TestForgotTopLevelFocus(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + + FocusConvey("This will be run because the top-level lacks Focus", func() { + output += "2" + }) + + Convey("3", func() { + output += "3" + }) + }) + + expectEqual(t, "1213", output) +} diff --git a/vendor/github.com/smartystreets/goconvey/convey/gotest/doc_test.go b/vendor/github.com/smartystreets/goconvey/convey/gotest/doc_test.go new file mode 100644 index 0000000000..1b6406be99 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/gotest/doc_test.go @@ -0,0 +1 @@ +package gotest diff --git a/vendor/github.com/smartystreets/goconvey/convey/isolated_execution_test.go b/vendor/github.com/smartystreets/goconvey/convey/isolated_execution_test.go new file mode 100644 index 0000000000..7e22b3caa5 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/isolated_execution_test.go @@ -0,0 +1,774 @@ +package convey + +import ( + "strconv" + "testing" + "time" +) + +func TestSingleScope(t *testing.T) { + output := prepare() + + Convey("hi", t, func() { + output += "done" + }) + + expectEqual(t, "done", output) +} + +func TestSingleScopeWithMultipleConveys(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + }) + + Convey("2", t, func() { + output += "2" + }) + + expectEqual(t, "12", output) +} + +func TestNestedScopes(t *testing.T) { + output := prepare() + + Convey("a", t, func() { + output += "a " + + Convey("bb", func() { + output += "bb " + + Convey("ccc", func() { + output += "ccc | " + }) + }) + }) + + expectEqual(t, "a bb ccc | ", output) +} + +func TestNestedScopesWithIsolatedExecution(t *testing.T) { + output := prepare() + + Convey("a", t, func() { + output += "a " + + Convey("aa", func() { + output += "aa " + + Convey("aaa", func() { + output += "aaa | " + }) + + Convey("aaa1", func() { + output += "aaa1 | " + }) + }) + + Convey("ab", func() { + output += "ab " + + Convey("abb", func() { + output += "abb | " + }) + }) + }) + + expectEqual(t, "a aa aaa | a aa aaa1 | a ab abb | ", output) +} + +func TestSingleScopeWithConveyAndNestedReset(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + + Reset(func() { + output += "a" + }) + }) + + expectEqual(t, "1a", output) +} + +func TestPanicingReset(t *testing.T) { + output := prepare() + + Convey("1", t, func() { + output += "1" + + Reset(func() { + panic("nooo") + }) + + Convey("runs since the reset hasn't yet", func() { + output += "a" + }) + + Convey("but this doesnt", func() { + output += "nope" + }) + }) + + expectEqual(t, "1a", output) +} + +func TestSingleScopeWithMultipleRegistrationsAndReset(t *testing.T) { + output := prepare() + + Convey("reset after each nested convey", t, func() { + Convey("first output", func() { + output += "1" + }) + + Convey("second output", func() { + output += "2" + }) + + Reset(func() { + output += "a" + }) + }) + + expectEqual(t, "1a2a", output) +} + +func TestSingleScopeWithMultipleRegistrationsAndMultipleResets(t *testing.T) { + output := prepare() + + Convey("each reset is run at end of each nested convey", t, func() { + Convey("1", func() { + output += "1" + }) + + Convey("2", func() { + output += "2" + }) + + Reset(func() { + output += "a" + }) + + Reset(func() { + output += "b" + }) + }) + + expectEqual(t, "1ab2ab", output) +} + +func Test_Failure_AtHigherLevelScopePreventsChildScopesFromRunning(t *testing.T) { + output := prepare() + + Convey("This step fails", t, func() { + So(1, ShouldEqual, 2) + + Convey("this should NOT be executed", func() { + output += "a" + }) + }) + + expectEqual(t, "", output) +} + +func Test_Panic_AtHigherLevelScopePreventsChildScopesFromRunning(t *testing.T) { + output := prepare() + + Convey("This step panics", t, func() { + Convey("this happens, because the panic didn't happen yet", func() { + output += "1" + }) + + output += "a" + + Convey("this should NOT be executed", func() { + output += "2" + }) + + output += "b" + + panic("Hi") + + output += "nope" + }) + + expectEqual(t, "1ab", output) +} + +func Test_Panic_InChildScopeDoes_NOT_PreventExecutionOfSiblingScopes(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step panics", func() { + panic("Hi") + output += "1" + }) + + Convey("This sibling should execute", func() { + output += "2" + }) + }) + + expectEqual(t, "2", output) +} + +func Test_Failure_InChildScopeDoes_NOT_PreventExecutionOfSiblingScopes(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step fails", func() { + So(1, ShouldEqual, 2) + output += "1" + }) + + Convey("This sibling should execute", func() { + output += "2" + }) + }) + + expectEqual(t, "2", output) +} + +func TestResetsAreAlwaysExecutedAfterScope_Panics(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step panics", func() { + panic("Hi") + output += "1" + }) + + Convey("This sibling step does not panic", func() { + output += "a" + + Reset(func() { + output += "b" + }) + }) + + Reset(func() { + output += "2" + }) + }) + + expectEqual(t, "2ab2", output) +} + +func TestResetsAreAlwaysExecutedAfterScope_Failures(t *testing.T) { + output := prepare() + + Convey("This is the parent", t, func() { + Convey("This step fails", func() { + So(1, ShouldEqual, 2) + output += "1" + }) + + Convey("This sibling step does not fail", func() { + output += "a" + + Reset(func() { + output += "b" + }) + }) + + Reset(func() { + output += "2" + }) + }) + + expectEqual(t, "2ab2", output) +} + +func TestSkipTopLevel(t *testing.T) { + output := prepare() + + SkipConvey("hi", t, func() { + output += "This shouldn't be executed!" + }) + + expectEqual(t, "", output) +} + +func TestSkipNestedLevel(t *testing.T) { + output := prepare() + + Convey("hi", t, func() { + output += "yes" + + SkipConvey("bye", func() { + output += "no" + }) + }) + + expectEqual(t, "yes", output) +} + +func TestSkipNestedLevelSkipsAllChildLevels(t *testing.T) { + output := prepare() + + Convey("hi", t, func() { + output += "yes" + + SkipConvey("bye", func() { + output += "no" + + Convey("byebye", func() { + output += "no-no" + }) + }) + }) + + expectEqual(t, "yes", output) +} + +func TestIterativeConveys(t *testing.T) { + output := prepare() + + Convey("Test", t, func() { + for x := 0; x < 10; x++ { + y := strconv.Itoa(x) + + Convey(y, func() { + output += y + }) + } + }) + + expectEqual(t, "0123456789", output) +} + +func TestClosureVariables(t *testing.T) { + output := prepare() + + i := 0 + + Convey("A", t, func() { + i = i + 1 + j := i + + output += "A" + strconv.Itoa(i) + " " + + Convey("B", func() { + k := j + j = j + 1 + + output += "B" + strconv.Itoa(k) + " " + + Convey("C", func() { + output += "C" + strconv.Itoa(k) + strconv.Itoa(j) + " " + }) + + Convey("D", func() { + output += "D" + strconv.Itoa(k) + strconv.Itoa(j) + " " + }) + }) + + Convey("C", func() { + output += "C" + strconv.Itoa(j) + " " + }) + }) + + output += "D" + strconv.Itoa(i) + " " + + expectEqual(t, "A1 B1 C12 A2 B2 D23 A3 C3 D3 ", output) +} + +func TestClosureVariablesWithReset(t *testing.T) { + output := prepare() + + i := 0 + + Convey("A", t, func() { + i = i + 1 + j := i + + output += "A" + strconv.Itoa(i) + " " + + Reset(func() { + output += "R" + strconv.Itoa(i) + strconv.Itoa(j) + " " + }) + + Convey("B", func() { + output += "B" + strconv.Itoa(j) + " " + }) + + Convey("C", func() { + output += "C" + strconv.Itoa(j) + " " + }) + }) + + output += "D" + strconv.Itoa(i) + " " + + expectEqual(t, "A1 B1 R11 A2 C2 R22 D2 ", output) +} + +func TestWrappedSimple(t *testing.T) { + prepare() + output := resetTestString{""} + + Convey("A", t, func() { + func() { + output.output += "A " + + Convey("B", func() { + output.output += "B " + + Convey("C", func() { + output.output += "C " + }) + + }) + + Convey("D", func() { + output.output += "D " + }) + }() + }) + + expectEqual(t, "A B C A D ", output.output) +} + +type resetTestString struct { + output string +} + +func addReset(o *resetTestString, f func()) func() { + return func() { + Reset(func() { + o.output += "R " + }) + + f() + } +} + +func TestWrappedReset(t *testing.T) { + prepare() + output := resetTestString{""} + + Convey("A", t, addReset(&output, func() { + output.output += "A " + + Convey("B", func() { + output.output += "B " + }) + + Convey("C", func() { + output.output += "C " + }) + })) + + expectEqual(t, "A B R A C R ", output.output) +} + +func TestWrappedReset2(t *testing.T) { + prepare() + output := resetTestString{""} + + Convey("A", t, func() { + Reset(func() { + output.output += "R " + }) + + func() { + output.output += "A " + + Convey("B", func() { + output.output += "B " + + Convey("C", func() { + output.output += "C " + }) + }) + + Convey("D", func() { + output.output += "D " + }) + }() + }) + + expectEqual(t, "A B C R A D R ", output.output) +} + +func TestInfiniteLoopWithTrailingFail(t *testing.T) { + done := make(chan int) + + go func() { + Convey("This fails", t, func() { + Convey("and this is run", func() { + So(true, ShouldEqual, true) + }) + + /* And this prevents the whole block to be marked as run */ + So(false, ShouldEqual, true) + }) + + done <- 1 + }() + + select { + case <-done: + return + case <-time.After(1 * time.Millisecond): + t.Fail() + } +} + +func TestOutermostResetInvokedForGrandchildren(t *testing.T) { + output := prepare() + + Convey("A", t, func() { + output += "A " + + Reset(func() { + output += "rA " + }) + + Convey("B", func() { + output += "B " + + Reset(func() { + output += "rB " + }) + + Convey("C", func() { + output += "C " + + Reset(func() { + output += "rC " + }) + }) + + Convey("D", func() { + output += "D " + + Reset(func() { + output += "rD " + }) + }) + }) + }) + + expectEqual(t, "A B C rC rB rA A B D rD rB rA ", output) +} + +func TestFailureOption(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + So(false, ShouldEqual, true) + output += "C " + }) + + expectEqual(t, "A B ", output) +} + +func TestFailureOption2(t *testing.T) { + output := prepare() + + Convey("A", t, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + So(false, ShouldEqual, true) + output += "C " + }) + + expectEqual(t, "A B ", output) +} + +func TestFailureOption3(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + So(false, ShouldEqual, true) + output += "C " + }) + + expectEqual(t, "A B C ", output) +} + +func TestFailureOptionInherit(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A1 " + So(false, ShouldEqual, true) + output += "A2 " + + Convey("B", func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + }) + + expectEqual(t, "A1 A2 B1 B2 B3 ", output) +} + +func TestFailureOptionInherit2(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A1 " + So(false, ShouldEqual, true) + output += "A2 " + + Convey("B", func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + So(false, ShouldEqual, true) + output += "A3 " + }) + }) + + expectEqual(t, "A1 ", output) +} + +func TestFailureOptionInherit3(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + + Convey("B", func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + }) + + expectEqual(t, "A1 A2 B1 B2 ", output) +} + +func TestFailureOptionNestedOverride(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A " + So(false, ShouldEqual, true) + output += "B " + + Convey("C", FailureHalts, func() { + output += "C " + So(true, ShouldEqual, true) + output += "D " + So(false, ShouldEqual, true) + output += "E " + }) + }) + + expectEqual(t, "A B C D ", output) +} + +func TestFailureOptionNestedOverride2(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A " + So(true, ShouldEqual, true) + output += "B " + + Convey("C", FailureContinues, func() { + output += "C " + So(true, ShouldEqual, true) + output += "D " + So(false, ShouldEqual, true) + output += "E " + }) + }) + + expectEqual(t, "A B C D E ", output) +} + +func TestMultipleInvocationInheritance(t *testing.T) { + output := prepare() + + Convey("A", t, FailureHalts, func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + + Convey("B", FailureContinues, func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + + Convey("C", func() { + output += "C1 " + So(true, ShouldEqual, true) + output += "C2 " + So(false, ShouldEqual, true) + output += "C3 " + }) + }) + + expectEqual(t, "A1 A2 B1 B2 B3 A1 A2 C1 C2 ", output) +} + +func TestMultipleInvocationInheritance2(t *testing.T) { + output := prepare() + + Convey("A", t, FailureContinues, func() { + output += "A1 " + So(true, ShouldEqual, true) + output += "A2 " + So(false, ShouldEqual, true) + output += "A3 " + + Convey("B", FailureHalts, func() { + output += "B1 " + So(true, ShouldEqual, true) + output += "B2 " + So(false, ShouldEqual, true) + output += "B3 " + }) + + Convey("C", func() { + output += "C1 " + So(true, ShouldEqual, true) + output += "C2 " + So(false, ShouldEqual, true) + output += "C3 " + }) + }) + + expectEqual(t, "A1 A2 A3 B1 B2 A1 A2 A3 C1 C2 C3 ", output) +} + +func TestSetDefaultFailureMode(t *testing.T) { + output := prepare() + + SetDefaultFailureMode(FailureContinues) // the default is normally FailureHalts + defer SetDefaultFailureMode(FailureHalts) + + Convey("A", t, func() { + output += "A1 " + So(true, ShouldBeFalse) + output += "A2 " + }) + + expectEqual(t, "A1 A2 ", output) +} + +func prepare() string { + testReporter = newNilReporter() + return "" +} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/dot_test.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/dot_test.go new file mode 100644 index 0000000000..a8d20d46f0 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/dot_test.go @@ -0,0 +1,40 @@ +package reporting + +import ( + "errors" + "testing" +) + +func TestDotReporterAssertionPrinting(t *testing.T) { + monochrome() + file := newMemoryFile() + printer := NewPrinter(file) + reporter := NewDotReporter(printer) + + reporter.Report(NewSuccessReport()) + reporter.Report(NewFailureReport("failed")) + reporter.Report(NewErrorReport(errors.New("error"))) + reporter.Report(NewSkipReport()) + + expected := dotSuccess + dotFailure + dotError + dotSkip + + if file.buffer != expected { + t.Errorf("\nExpected: '%s'\nActual: '%s'", expected, file.buffer) + } +} + +func TestDotReporterOnlyReportsAssertions(t *testing.T) { + monochrome() + file := newMemoryFile() + printer := NewPrinter(file) + reporter := NewDotReporter(printer) + + reporter.BeginStory(nil) + reporter.Enter(nil) + reporter.Exit() + reporter.EndStory() + + if file.buffer != "" { + t.Errorf("\nExpected: '(blank)'\nActual: '%s'", file.buffer) + } +} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go new file mode 100644 index 0000000000..fda189458e --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/gotest_test.go @@ -0,0 +1,66 @@ +package reporting + +import "testing" + +func TestReporterReceivesSuccessfulReport(t *testing.T) { + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.Report(NewSuccessReport()) + + if test.failed { + t.Errorf("Should have have marked test as failed--the report reflected success.") + } +} + +func TestReporterReceivesFailureReport(t *testing.T) { + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.Report(NewFailureReport("This is a failure.")) + + if !test.failed { + t.Errorf("Test should have been marked as failed (but it wasn't).") + } +} + +func TestReporterReceivesErrorReport(t *testing.T) { + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.Report(NewErrorReport("This is an error.")) + + if !test.failed { + t.Errorf("Test should have been marked as failed (but it wasn't).") + } +} + +func TestReporterIsResetAtTheEndOfTheStory(t *testing.T) { + defer catch(t) + reporter := NewGoTestReporter() + test := new(fakeTest) + reporter.BeginStory(NewStoryReport(test)) + reporter.EndStory() + + reporter.Report(NewSuccessReport()) +} + +func TestReporterNoopMethods(t *testing.T) { + reporter := NewGoTestReporter() + reporter.Enter(NewScopeReport("title")) + reporter.Exit() +} + +func catch(t *testing.T) { + if r := recover(); r != nil { + t.Log("Getting to this point means we've passed (because we caught a panic appropriately).") + } +} + +type fakeTest struct { + failed bool +} + +func (self *fakeTest) Fail() { + self.failed = true +} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/printer_test.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/printer_test.go new file mode 100644 index 0000000000..94202d5ac9 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/printer_test.go @@ -0,0 +1,181 @@ +package reporting + +import "testing" + +func TestPrint(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!" + + printer.Print(expected) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintFormat(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + template := "Hi, %s" + name := "Ralph" + expected := "Hi, Ralph" + + printer.Print(template, name) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintPreservesEncodedStrings(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "= -> %3D" + printer.Print(expected) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintln(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!" + + printer.Println(expected) + + if file.buffer != expected+"\n" { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnFormat(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + template := "Hi, %s" + name := "Ralph" + expected := "Hi, Ralph\n" + + printer.Println(template, name) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnPreservesEncodedStrings(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "= -> %3D" + printer.Println(expected) + + if file.buffer != expected+"\n" { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintIndented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const message = "Hello, World!\nGoodbye, World!" + const expected = " Hello, World!\n Goodbye, World!" + + printer.Indent() + printer.Print(message) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintDedented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!\nGoodbye, World!" + + printer.Indent() + printer.Dedent() + printer.Print(expected) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnIndented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const message = "Hello, World!\nGoodbye, World!" + const expected = " Hello, World!\n Goodbye, World!\n" + + printer.Indent() + printer.Println(message) + + if file.buffer != expected { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestPrintlnDedented(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + const expected = "Hello, World!\nGoodbye, World!" + + printer.Indent() + printer.Dedent() + printer.Println(expected) + + if file.buffer != expected+"\n" { + t.Errorf("Expected '%s' to equal '%s'.", expected, file.buffer) + } +} + +func TestDedentTooFarShouldNotPanic(t *testing.T) { + defer func() { + if r := recover(); r != nil { + t.Error("Should not have panicked!") + } + }() + file := newMemoryFile() + printer := NewPrinter(file) + + printer.Dedent() + + t.Log("Getting to this point without panicking means we passed.") +} + +func TestInsert(t *testing.T) { + file := newMemoryFile() + printer := NewPrinter(file) + + printer.Indent() + printer.Print("Hi") + printer.Insert(" there") + printer.Dedent() + + expected := " Hi there" + if file.buffer != expected { + t.Errorf("Should have written '%s' but instead wrote '%s'.", expected, file.buffer) + } +} + +////////////////// memoryFile //////////////////// + +type memoryFile struct { + buffer string +} + +func (self *memoryFile) Write(p []byte) (n int, err error) { + self.buffer += string(p) + return len(p), nil +} + +func (self *memoryFile) String() string { + return self.buffer +} + +func newMemoryFile() *memoryFile { + return new(memoryFile) +} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/problems_test.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/problems_test.go new file mode 100644 index 0000000000..92f0ca35cc --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/problems_test.go @@ -0,0 +1,51 @@ +package reporting + +import ( + "strings" + "testing" +) + +func TestNoopProblemReporterActions(t *testing.T) { + file, reporter := setup() + reporter.BeginStory(nil) + reporter.Enter(nil) + reporter.Exit() + expected := "" + actual := file.String() + if expected != actual { + t.Errorf("Expected: '(blank)'\nActual: '%s'", actual) + } +} + +func TestReporterPrintsFailuresAndErrorsAtTheEndOfTheStory(t *testing.T) { + file, reporter := setup() + reporter.Report(NewFailureReport("failed")) + reporter.Report(NewErrorReport("error")) + reporter.Report(NewSuccessReport()) + reporter.EndStory() + + result := file.String() + if !strings.Contains(result, "Errors:\n") { + t.Errorf("Expected errors, found none.") + } + if !strings.Contains(result, "Failures:\n") { + t.Errorf("Expected failures, found none.") + } + + // Each stack trace looks like: `* /path/to/file.go`, so look for `* `. + // With go 1.4+ there is a line in some stack traces that looks like this: + // `testing.(*M).Run(0x2082d60a0, 0x25b7c0)` + // So we can't just look for "*" anymore. + problemCount := strings.Count(result, "* ") + if problemCount != 2 { + t.Errorf("Expected one failure and one error (total of 2 '*' characters). Got %d", problemCount) + } +} + +func setup() (file *memoryFile, reporter *problem) { + monochrome() + file = newMemoryFile() + printer := NewPrinter(file) + reporter = NewProblemReporter(printer) + return +} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go new file mode 100644 index 0000000000..4e5caf63b2 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/reporting/reporter_test.go @@ -0,0 +1,94 @@ +package reporting + +import ( + "runtime" + "testing" +) + +func TestEachNestedReporterReceivesTheCallFromTheContainingReporter(t *testing.T) { + fake1 := newFakeReporter() + fake2 := newFakeReporter() + reporter := NewReporters(fake1, fake2) + + reporter.BeginStory(nil) + assertTrue(t, fake1.begun) + assertTrue(t, fake2.begun) + + reporter.Enter(NewScopeReport("scope")) + assertTrue(t, fake1.entered) + assertTrue(t, fake2.entered) + + reporter.Report(NewSuccessReport()) + assertTrue(t, fake1.reported) + assertTrue(t, fake2.reported) + + reporter.Exit() + assertTrue(t, fake1.exited) + assertTrue(t, fake2.exited) + + reporter.EndStory() + assertTrue(t, fake1.ended) + assertTrue(t, fake2.ended) + + content := []byte("hi") + written, err := reporter.Write(content) + assertTrue(t, fake1.written) + assertTrue(t, fake2.written) + assertEqual(t, written, len(content)) + assertNil(t, err) + +} + +func assertTrue(t *testing.T, value bool) { + if !value { + _, _, line, _ := runtime.Caller(1) + t.Errorf("Value should have been true (but was false). See line %d", line) + } +} + +func assertEqual(t *testing.T, expected, actual int) { + if actual != expected { + _, _, line, _ := runtime.Caller(1) + t.Errorf("Value should have been %d (but was %d). See line %d", expected, actual, line) + } +} + +func assertNil(t *testing.T, err error) { + if err != nil { + _, _, line, _ := runtime.Caller(1) + t.Errorf("Error should have been (but wasn't). See line %d", err, line) + } +} + +type fakeReporter struct { + begun bool + entered bool + reported bool + exited bool + ended bool + written bool +} + +func newFakeReporter() *fakeReporter { + return &fakeReporter{} +} + +func (self *fakeReporter) BeginStory(story *StoryReport) { + self.begun = true +} +func (self *fakeReporter) Enter(scope *ScopeReport) { + self.entered = true +} +func (self *fakeReporter) Report(report *AssertionResult) { + self.reported = true +} +func (self *fakeReporter) Exit() { + self.exited = true +} +func (self *fakeReporter) EndStory() { + self.ended = true +} +func (self *fakeReporter) Write(content []byte) (int, error) { + self.written = true + return len(content), nil +} diff --git a/vendor/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go b/vendor/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go new file mode 100644 index 0000000000..69125c3cf4 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/reporting_hooks_test.go @@ -0,0 +1,317 @@ +package convey + +import ( + "fmt" + "net/http" + "net/http/httptest" + "path" + "runtime" + "strconv" + "strings" + "testing" + + "github.com/smartystreets/goconvey/convey/reporting" +) + +func TestSingleScopeReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(1, ShouldEqual, 1) + }) + + expectEqual(t, "Begin|A|Success|Exit|End", myReporter.wholeStory()) +} + +func TestNestedScopeReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("B", func() { + So(1, ShouldEqual, 1) + }) + }) + + expectEqual(t, "Begin|A|B|Success|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestFailureReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(1, ShouldBeNil) + }) + + expectEqual(t, "Begin|A|Failure|Exit|End", myReporter.wholeStory()) +} + +func TestFirstFailureEndsScopeExecution(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(1, ShouldBeNil) + So(nil, ShouldBeNil) + }) + + expectEqual(t, "Begin|A|Failure|Exit|End", myReporter.wholeStory()) +} + +func TestComparisonFailureDeserializedAndReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So("hi", ShouldEqual, "bye") + }) + + expectEqual(t, "Begin|A|Failure(bye/hi)|Exit|End", myReporter.wholeStory()) +} + +func TestNestedFailureReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("B", func() { + So(2, ShouldBeNil) + }) + }) + + expectEqual(t, "Begin|A|B|Failure|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestSuccessAndFailureReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + So(nil, ShouldBeNil) + So(1, ShouldBeNil) + }) + + expectEqual(t, "Begin|A|Success|Failure|Exit|End", myReporter.wholeStory()) +} + +func TestIncompleteActionReportedAsSkipped(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("B", nil) + }) + + expectEqual(t, "Begin|A|B|Skipped|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestSkippedConveyReportedAsSkipped(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + SkipConvey("B", func() { + So(1, ShouldEqual, 1) + }) + }) + + expectEqual(t, "Begin|A|B|Skipped|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestMultipleSkipsAreReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + Convey("0", func() { + So(nil, ShouldBeNil) + }) + + SkipConvey("1", func() {}) + SkipConvey("2", func() {}) + + Convey("3", nil) + Convey("4", nil) + + Convey("5", func() { + So(nil, ShouldBeNil) + }) + }) + + expected := "Begin" + + "|A|0|Success|Exit|Exit" + + "|A|1|Skipped|Exit|Exit" + + "|A|2|Skipped|Exit|Exit" + + "|A|3|Skipped|Exit|Exit" + + "|A|4|Skipped|Exit|Exit" + + "|A|5|Success|Exit|Exit" + + "|End" + + expectEqual(t, expected, myReporter.wholeStory()) +} + +func TestSkippedAssertionIsNotReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + SkipSo(1, ShouldEqual, 1) + }) + + expectEqual(t, "Begin|A|Skipped|Exit|End", myReporter.wholeStory()) +} + +func TestMultipleSkippedAssertionsAreNotReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + SkipSo(1, ShouldEqual, 1) + So(1, ShouldEqual, 1) + SkipSo(1, ShouldEqual, 1) + }) + + expectEqual(t, "Begin|A|Skipped|Success|Skipped|Exit|End", myReporter.wholeStory()) +} + +func TestErrorByManualPanicReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + panic("Gopher alert!") + }) + + expectEqual(t, "Begin|A|Error|Exit|End", myReporter.wholeStory()) +} + +func TestIterativeConveysReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + for x := 0; x < 3; x++ { + Convey(strconv.Itoa(x), func() { + So(x, ShouldEqual, x) + }) + } + }) + + expectEqual(t, "Begin|A|0|Success|Exit|Exit|A|1|Success|Exit|Exit|A|2|Success|Exit|Exit|End", myReporter.wholeStory()) +} + +func TestNestedIterativeConveysReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func() { + for x := 0; x < 3; x++ { + Convey(strconv.Itoa(x), func() { + for y := 0; y < 3; y++ { + Convey("< "+strconv.Itoa(y), func() { + So(x, ShouldBeLessThan, y) + }) + } + }) + } + }) + + expectEqual(t, ("Begin|" + + "A|0|< 0|Failure|Exit|Exit|Exit|" + + "A|0|< 1|Success|Exit|Exit|Exit|" + + "A|0|< 2|Success|Exit|Exit|Exit|" + + "A|1|< 0|Failure|Exit|Exit|Exit|" + + "A|1|< 1|Failure|Exit|Exit|Exit|" + + "A|1|< 2|Success|Exit|Exit|Exit|" + + "A|2|< 0|Failure|Exit|Exit|Exit|" + + "A|2|< 1|Failure|Exit|Exit|Exit|" + + "A|2|< 2|Failure|Exit|Exit|Exit|" + + "End"), myReporter.wholeStory()) +} + +func TestEmbeddedAssertionReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + Convey("A", test, func(c C) { + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c.So(r.FormValue("msg"), ShouldEqual, "ping") + })) + http.DefaultClient.Get(ts.URL + "?msg=ping") + }) + + expectEqual(t, "Begin|A|Success|Exit|End", myReporter.wholeStory()) +} + +func TestEmbeddedContextHelperReported(t *testing.T) { + myReporter, test := setupFakeReporter() + + helper := func(c C) http.HandlerFunc { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c.Convey("Embedded", func() { + So(r.FormValue("msg"), ShouldEqual, "ping") + }) + }) + } + + Convey("A", test, func(c C) { + ts := httptest.NewServer(helper(c)) + http.DefaultClient.Get(ts.URL + "?msg=ping") + }) + + expectEqual(t, "Begin|A|Embedded|Success|Exit|Exit|End", myReporter.wholeStory()) +} + +func expectEqual(t *testing.T, expected interface{}, actual interface{}) { + if expected != actual { + _, file, line, _ := runtime.Caller(1) + t.Errorf("Expected '%v' to be '%v' but it wasn't. See '%s' at line %d.", + actual, expected, path.Base(file), line) + } +} + +func setupFakeReporter() (*fakeReporter, *fakeGoTest) { + myReporter := new(fakeReporter) + myReporter.calls = []string{} + testReporter = myReporter + return myReporter, new(fakeGoTest) +} + +type fakeReporter struct { + calls []string +} + +func (self *fakeReporter) BeginStory(story *reporting.StoryReport) { + self.calls = append(self.calls, "Begin") +} + +func (self *fakeReporter) Enter(scope *reporting.ScopeReport) { + self.calls = append(self.calls, scope.Title) +} + +func (self *fakeReporter) Report(report *reporting.AssertionResult) { + if report.Error != nil { + self.calls = append(self.calls, "Error") + } else if report.Failure != "" { + message := "Failure" + if report.Expected != "" || report.Actual != "" { + message += fmt.Sprintf("(%s/%s)", report.Expected, report.Actual) + } + self.calls = append(self.calls, message) + } else if report.Skipped { + self.calls = append(self.calls, "Skipped") + } else { + self.calls = append(self.calls, "Success") + } +} + +func (self *fakeReporter) Exit() { + self.calls = append(self.calls, "Exit") +} + +func (self *fakeReporter) EndStory() { + self.calls = append(self.calls, "End") +} + +func (self *fakeReporter) Write(content []byte) (int, error) { + return len(content), nil // no-op +} + +func (self *fakeReporter) wholeStory() string { + return strings.Join(self.calls, "|") +} + +//////////////////////////////// + +type fakeGoTest struct{} + +func (self *fakeGoTest) Fail() {} +func (self *fakeGoTest) Fatalf(format string, args ...interface{}) {} + +var test t = new(fakeGoTest) diff --git a/vendor/github.com/smartystreets/goconvey/convey/story_conventions_test.go b/vendor/github.com/smartystreets/goconvey/convey/story_conventions_test.go new file mode 100644 index 0000000000..84832c78d5 --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/convey/story_conventions_test.go @@ -0,0 +1,175 @@ +package convey + +import ( + "reflect" + "testing" +) + +func expectPanic(t *testing.T, f string) interface{} { + r := recover() + if r != nil { + if cp, ok := r.(*conveyErr); ok { + if cp.fmt != f { + t.Error("Incorrect panic message.") + } + } else { + t.Errorf("Incorrect panic type. %s", reflect.TypeOf(r)) + } + } else { + t.Error("Expected panic but none occured") + } + return r +} + +func TestMissingTopLevelGoTestReferenceCausesPanic(t *testing.T) { + output := map[string]bool{} + + defer expectEqual(t, false, output["good"]) + defer expectPanic(t, missingGoTest) + + Convey("Hi", func() { + output["bad"] = true // this shouldn't happen + }) +} + +func TestMissingTopLevelGoTestReferenceAfterGoodExample(t *testing.T) { + output := map[string]bool{} + + defer func() { + expectEqual(t, true, output["good"]) + expectEqual(t, false, output["bad"]) + }() + defer expectPanic(t, missingGoTest) + + Convey("Good example", t, func() { + output["good"] = true + }) + + Convey("Bad example", func() { + output["bad"] = true // shouldn't happen + }) +} + +func TestExtraReferencePanics(t *testing.T) { + output := map[string]bool{} + + defer expectEqual(t, false, output["bad"]) + defer expectPanic(t, extraGoTest) + + Convey("Good example", t, func() { + Convey("Bad example - passing in *testing.T a second time!", t, func() { + output["bad"] = true // shouldn't happen + }) + }) +} + +func TestParseRegistrationMissingRequiredElements(t *testing.T) { + defer expectPanic(t, parseError) + + Convey() +} + +func TestParseRegistration_MissingNameString(t *testing.T) { + defer expectPanic(t, parseError) + + Convey(func() {}) +} + +func TestParseRegistration_MissingActionFunc(t *testing.T) { + defer expectPanic(t, parseError) + + Convey("Hi there", 12345) +} + +func TestFailureModeNoContext(t *testing.T) { + Convey("Foo", t, func() { + done := make(chan int, 1) + go func() { + defer func() { done <- 1 }() + defer expectPanic(t, noStackContext) + So(len("I have no context"), ShouldBeGreaterThan, 0) + }() + <-done + }) +} + +func TestFailureModeDuplicateSuite(t *testing.T) { + Convey("cool", t, func() { + defer expectPanic(t, multipleIdenticalConvey) + + Convey("dup", nil) + Convey("dup", nil) + }) +} + +func TestFailureModeIndeterminentSuiteNames(t *testing.T) { + defer expectPanic(t, differentConveySituations) + + name := "bob" + Convey("cool", t, func() { + for i := 0; i < 3; i++ { + Convey(name, func() {}) + name += "bob" + } + }) +} + +func TestFailureModeNestedIndeterminentSuiteNames(t *testing.T) { + defer expectPanic(t, differentConveySituations) + + name := "bob" + Convey("cool", t, func() { + Convey("inner", func() { + for i := 0; i < 3; i++ { + Convey(name, func() {}) + name += "bob" + } + }) + }) +} + +func TestFailureModeParameterButMissing(t *testing.T) { + defer expectPanic(t, parseError) + + prepare() + + Convey("Foobar", t, FailureHalts) +} + +func TestFailureModeParameterWithAction(t *testing.T) { + prepare() + + Convey("Foobar", t, FailureHalts, func() {}) +} + +func TestExtraConveyParameters(t *testing.T) { + defer expectPanic(t, parseError) + + prepare() + + Convey("Foobar", t, FailureHalts, func() {}, "This is not supposed to be here") +} + +func TestExtraConveyParameters2(t *testing.T) { + defer expectPanic(t, parseError) + + prepare() + + Convey("Foobar", t, func() {}, "This is not supposed to be here") +} + +func TestExtraConveyParameters3(t *testing.T) { + defer expectPanic(t, parseError) + + output := prepare() + + Convey("A", t, func() { + output += "A " + + Convey("B", func() { + output += "B " + }, "This is not supposed to be here") + }) + + expectEqual(t, "A ", output) +} diff --git a/vendor/github.com/smartystreets/goconvey/dependencies.go b/vendor/github.com/smartystreets/goconvey/dependencies.go new file mode 100644 index 0000000000..0839e27fdf --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/dependencies.go @@ -0,0 +1,4 @@ +package main + +import _ "github.com/jtolds/gls" +import _ "github.com/smartystreets/assertions" diff --git a/vendor/github.com/smartystreets/goconvey/doc_test.go b/vendor/github.com/smartystreets/goconvey/doc_test.go new file mode 100644 index 0000000000..06ab7d0f9a --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/doc_test.go @@ -0,0 +1 @@ +package main diff --git a/vendor/github.com/smartystreets/goconvey/goconvey.go b/vendor/github.com/smartystreets/goconvey/goconvey.go new file mode 100644 index 0000000000..4d5fc0ef6d --- /dev/null +++ b/vendor/github.com/smartystreets/goconvey/goconvey.go @@ -0,0 +1,280 @@ +// This executable provides an HTTP server that watches for file system changes +// to .go files within the working directory (and all nested go packages). +// Navigating to the configured host and port in a web browser will display the +// latest results of running `go test` in each go package. +package main + +import ( + "flag" + "fmt" + "log" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "go/build" + + "github.com/smartystreets/goconvey/web/server/api" + "github.com/smartystreets/goconvey/web/server/contract" + "github.com/smartystreets/goconvey/web/server/executor" + "github.com/smartystreets/goconvey/web/server/messaging" + "github.com/smartystreets/goconvey/web/server/parser" + "github.com/smartystreets/goconvey/web/server/system" + "github.com/smartystreets/goconvey/web/server/watch" +) + +func init() { + flags() + folders() +} +func flags() { + flag.IntVar(&port, "port", 8080, "The port at which to serve http.") + flag.StringVar(&host, "host", "127.0.0.1", "The host at which to serve http.") + flag.DurationVar(&nap, "poll", quarterSecond, "The interval to wait between polling the file system for changes (default: 250ms).") + flag.IntVar(&packages, "packages", 10, "The number of packages to test in parallel. Higher == faster but more costly in terms of computing. (default: 10)") + flag.StringVar(&gobin, "gobin", "go", "The path to the 'go' binary (default: search on the PATH).") + flag.BoolVar(&cover, "cover", true, "Enable package-level coverage statistics. Requires Go 1.2+ and the go cover tool. (default: true)") + flag.IntVar(&depth, "depth", -1, "The directory scanning depth. If -1, scan infinitely deep directory structures. 0: scan working directory. 1+: Scan into nested directories, limited to value. (default: -1)") + flag.StringVar(&timeout, "timeout", "0", "The test execution timeout if none is specified in the *.goconvey file (default is '0', which is the same as not providing this option).") + flag.StringVar(&watchedSuffixes, "watchedSuffixes", ".go", "A comma separated list of file suffixes to watch for modifications (default: .go).") + flag.StringVar(&excludedDirs, "excludedDirs", "vendor,node_modules", "A comma separated list of directories that will be excluded from being watched") + flag.StringVar(&workDir, "workDir", "", "set goconvey working directory (default current directory)") + + log.SetOutput(os.Stdout) + log.SetFlags(log.LstdFlags | log.Lshortfile) +} +func folders() { + _, file, _, _ := runtime.Caller(0) + here := filepath.Dir(file) + static = filepath.Join(here, "/web/client") + reports = filepath.Join(static, "reports") +} + +func main() { + flag.Parse() + log.Printf(initialConfiguration, host, port, nap, cover) + + working := getWorkDir() + cover = coverageEnabled(cover, reports) + shell := system.NewShell(gobin, reports, cover, timeout) + + watcherInput := make(chan messaging.WatcherCommand) + watcherOutput := make(chan messaging.Folders) + excludedDirItems := strings.Split(excludedDirs, `,`) + watcher := watch.NewWatcher(working, depth, nap, watcherInput, watcherOutput, watchedSuffixes, excludedDirItems) + + parser := parser.NewParser(parser.ParsePackageResults) + tester := executor.NewConcurrentTester(shell) + tester.SetBatchSize(packages) + + longpollChan := make(chan chan string) + executor := executor.NewExecutor(tester, parser, longpollChan) + server := api.NewHTTPServer(working, watcherInput, executor, longpollChan) + go runTestOnUpdates(watcherOutput, executor, server) + go watcher.Listen() + go launchBrowser(host, port) + serveHTTP(server) +} + +func browserCmd() (string, bool) { + browser := map[string]string{ + "darwin": "open", + "linux": "xdg-open", + "win32": "start", + } + cmd, ok := browser[runtime.GOOS] + return cmd, ok +} + +func launchBrowser(host string, port int) { + browser, ok := browserCmd() + if !ok { + log.Printf("Skipped launching browser for this OS: %s", runtime.GOOS) + return + } + + log.Printf("Launching browser on %s:%d", host, port) + url := fmt.Sprintf("http://%s:%d", host, port) + cmd := exec.Command(browser, url) + + output, err := cmd.CombinedOutput() + if err != nil { + log.Println(err) + } + log.Println(string(output)) +} + +func runTestOnUpdates(queue chan messaging.Folders, executor contract.Executor, server contract.Server) { + for update := range queue { + log.Println("Received request from watcher to execute tests...") + packages := extractPackages(update) + output := executor.ExecuteTests(packages) + root := extractRoot(update, packages) + server.ReceiveUpdate(root, output) + } +} + +func extractPackages(folderList messaging.Folders) []*contract.Package { + packageList := []*contract.Package{} + for _, folder := range folderList { + hasImportCycle := testFilesImportTheirOwnPackage(folder.Path) + packageList = append(packageList, contract.NewPackage(folder, hasImportCycle)) + } + return packageList +} + +func extractRoot(folderList messaging.Folders, packageList []*contract.Package) string { + path := packageList[0].Path + folder := folderList[path] + return folder.Root +} + +// This method exists because of a bug in the go cover tool that +// causes an infinite loop when you try to run `go test -cover` +// on a package that has an import cycle defined in one of it's +// test files. Yuck. +func testFilesImportTheirOwnPackage(packagePath string) bool { + meta, err := build.ImportDir(packagePath, build.AllowBinary) + if err != nil { + return false + } + + for _, dependency := range meta.TestImports { + if dependency == meta.ImportPath { + return true + } + } + return false +} + +func serveHTTP(server contract.Server) { + serveStaticResources() + serveAjaxMethods(server) + activateServer() +} + +func serveStaticResources() { + http.Handle("/", http.FileServer(http.Dir(static))) +} + +func serveAjaxMethods(server contract.Server) { + http.HandleFunc("/watch", server.Watch) + http.HandleFunc("/ignore", server.Ignore) + http.HandleFunc("/reinstate", server.Reinstate) + http.HandleFunc("/latest", server.Results) + http.HandleFunc("/execute", server.Execute) + http.HandleFunc("/status", server.Status) + http.HandleFunc("/status/poll", server.LongPollStatus) + http.HandleFunc("/pause", server.TogglePause) +} + +func activateServer() { + log.Printf("Serving HTTP at: http://%s:%d\n", host, port) + err := http.ListenAndServe(fmt.Sprintf("%s:%d", host, port), nil) + if err != nil { + log.Println(err) + } +} + +func coverageEnabled(cover bool, reports string) bool { + return (cover && + goVersion_1_2_orGreater() && + coverToolInstalled() && + ensureReportDirectoryExists(reports)) +} +func goVersion_1_2_orGreater() bool { + version := runtime.Version() // 'go1.2....' + major, minor := version[2], version[4] + version_1_2 := major >= byte('1') && minor >= byte('2') + if !version_1_2 { + log.Printf(pleaseUpgradeGoVersion, version) + return false + } + return true +} +func coverToolInstalled() bool { + working := getWorkDir() + command := system.NewCommand(working, "go", "tool", "cover").Execute() + installed := strings.Contains(command.Output, "Usage of 'go tool cover':") + if !installed { + log.Print(coverToolMissing) + return false + } + return true +} +func ensureReportDirectoryExists(reports string) bool { + result, err := exists(reports) + if err != nil { + log.Fatal(err) + } + if result { + return true + } + + if err := os.Mkdir(reports, 0755); err == nil { + return true + } + + log.Printf(reportDirectoryUnavailable, reports) + return false +} +func exists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} +func getWorkDir() string { + working := "" + var err error + if workDir != "" { + working = workDir + } else { + working, err = os.Getwd() + if err != nil { + log.Fatal(err) + } + } + result, err := exists(working) + if err != nil { + log.Fatal(err) + } + if !result { + log.Fatalf("Path:%s does not exists", working) + } + return working +} + +var ( + port int + host string + gobin string + nap time.Duration + packages int + cover bool + depth int + timeout string + watchedSuffixes string + excludedDirs string + + static string + reports string + + quarterSecond = time.Millisecond * 250 + workDir string +) + +const ( + initialConfiguration = "Initial configuration: [host: %s] [port: %d] [poll: %v] [cover: %v]\n" + pleaseUpgradeGoVersion = "Go version is less that 1.2 (%s), please upgrade to the latest stable version to enable coverage reporting.\n" + coverToolMissing = "Go cover tool is not installed or not accessible: for Go < 1.5 run`go get golang.org/x/tools/cmd/cover`\n For >= Go 1.5 run `go install $GOROOT/src/cmd/cover`\n" + reportDirectoryUnavailable = "Could not find or create the coverage report directory (at: '%s'). You probably won't see any coverage statistics...\n" +) diff --git a/vendor/github.com/syndtr/goleveldb/.travis.yml b/vendor/github.com/syndtr/goleveldb/.travis.yml new file mode 100644 index 0000000000..82de37735d --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip + +script: + - go test -timeout 1h ./... + - go test -timeout 30m -race -run "TestDB_(Concurrent|GoleveldbIssue74)" ./leveldb diff --git a/vendor/github.com/syndtr/goleveldb/README.md b/vendor/github.com/syndtr/goleveldb/README.md new file mode 100644 index 0000000000..259286f550 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/README.md @@ -0,0 +1,105 @@ +This is an implementation of the [LevelDB key/value database](http:code.google.com/p/leveldb) in the [Go programming language](http:golang.org). + +[![Build Status](https://travis-ci.org/syndtr/goleveldb.png?branch=master)](https://travis-ci.org/syndtr/goleveldb) + +Installation +----------- + + go get github.com/syndtr/goleveldb/leveldb + +Requirements +----------- + +* Need at least `go1.4` or newer. + +Usage +----------- + +Create or open a database: +```go +db, err := leveldb.OpenFile("path/to/db", nil) +... +defer db.Close() +... +``` +Read or modify the database content: +```go +// Remember that the contents of the returned slice should not be modified. +data, err := db.Get([]byte("key"), nil) +... +err = db.Put([]byte("key"), []byte("value"), nil) +... +err = db.Delete([]byte("key"), nil) +... +``` + +Iterate over database content: +```go +iter := db.NewIterator(nil, nil) +for iter.Next() { + // Remember that the contents of the returned slice should not be modified, and + // only valid until the next call to Next. + key := iter.Key() + value := iter.Value() + ... +} +iter.Release() +err = iter.Error() +... +``` +Seek-then-Iterate: +```go +iter := db.NewIterator(nil, nil) +for ok := iter.Seek(key); ok; ok = iter.Next() { + // Use key/value. + ... +} +iter.Release() +err = iter.Error() +... +``` +Iterate over subset of database content: +```go +iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil) +for iter.Next() { + // Use key/value. + ... +} +iter.Release() +err = iter.Error() +... +``` +Iterate over subset of database content with a particular prefix: +```go +iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil) +for iter.Next() { + // Use key/value. + ... +} +iter.Release() +err = iter.Error() +... +``` +Batch writes: +```go +batch := new(leveldb.Batch) +batch.Put([]byte("foo"), []byte("value")) +batch.Put([]byte("bar"), []byte("another value")) +batch.Delete([]byte("baz")) +err = db.Write(batch, nil) +... +``` +Use bloom filter: +```go +o := &opt.Options{ + Filter: filter.NewBloomFilter(10), +} +db, err := leveldb.OpenFile("path/to/db", o) +... +defer db.Close() +... +``` +Documentation +----------- + +You can read package documentation [here](http:godoc.org/github.com/syndtr/goleveldb). diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go new file mode 100644 index 0000000000..ce0925e084 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch_test.go @@ -0,0 +1,123 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/memdb" +) + +type tbRec struct { + kt keyType + key, value []byte +} + +type testBatch struct { + rec []*tbRec +} + +func (p *testBatch) Put(key, value []byte) { + p.rec = append(p.rec, &tbRec{keyTypeVal, key, value}) +} + +func (p *testBatch) Delete(key []byte) { + p.rec = append(p.rec, &tbRec{keyTypeDel, key, nil}) +} + +func compareBatch(t *testing.T, b1, b2 *Batch) { + if b1.seq != b2.seq { + t.Errorf("invalid seq number want %d, got %d", b1.seq, b2.seq) + } + if b1.Len() != b2.Len() { + t.Fatalf("invalid record length want %d, got %d", b1.Len(), b2.Len()) + } + p1, p2 := new(testBatch), new(testBatch) + err := b1.Replay(p1) + if err != nil { + t.Fatal("error when replaying batch 1: ", err) + } + err = b2.Replay(p2) + if err != nil { + t.Fatal("error when replaying batch 2: ", err) + } + for i := range p1.rec { + r1, r2 := p1.rec[i], p2.rec[i] + if r1.kt != r2.kt { + t.Errorf("invalid type on record '%d' want %d, got %d", i, r1.kt, r2.kt) + } + if !bytes.Equal(r1.key, r2.key) { + t.Errorf("invalid key on record '%d' want %s, got %s", i, string(r1.key), string(r2.key)) + } + if r1.kt == keyTypeVal { + if !bytes.Equal(r1.value, r2.value) { + t.Errorf("invalid value on record '%d' want %s, got %s", i, string(r1.value), string(r2.value)) + } + } + } +} + +func TestBatch_EncodeDecode(t *testing.T) { + b1 := new(Batch) + b1.seq = 10009 + b1.Put([]byte("key1"), []byte("value1")) + b1.Put([]byte("key2"), []byte("value2")) + b1.Delete([]byte("key1")) + b1.Put([]byte("k"), []byte("")) + b1.Put([]byte("zzzzzzzzzzz"), []byte("zzzzzzzzzzzzzzzzzzzzzzzz")) + b1.Delete([]byte("key10000")) + b1.Delete([]byte("k")) + buf := b1.encode() + b2 := new(Batch) + err := b2.decode(0, buf) + if err != nil { + t.Error("error when decoding batch: ", err) + } + compareBatch(t, b1, b2) +} + +func TestBatch_Append(t *testing.T) { + b1 := new(Batch) + b1.seq = 10009 + b1.Put([]byte("key1"), []byte("value1")) + b1.Put([]byte("key2"), []byte("value2")) + b1.Delete([]byte("key1")) + b1.Put([]byte("foo"), []byte("foovalue")) + b1.Put([]byte("bar"), []byte("barvalue")) + b2a := new(Batch) + b2a.seq = 10009 + b2a.Put([]byte("key1"), []byte("value1")) + b2a.Put([]byte("key2"), []byte("value2")) + b2a.Delete([]byte("key1")) + b2b := new(Batch) + b2b.Put([]byte("foo"), []byte("foovalue")) + b2b.Put([]byte("bar"), []byte("barvalue")) + b2a.append(b2b) + compareBatch(t, b1, b2a) + if b1.size() != b2a.size() { + t.Fatalf("invalid batch size want %d, got %d", b1.size(), b2a.size()) + } +} + +func TestBatch_Size(t *testing.T) { + b := new(Batch) + for i := 0; i < 2; i++ { + b.Put([]byte("key1"), []byte("value1")) + b.Put([]byte("key2"), []byte("value2")) + b.Delete([]byte("key1")) + b.Put([]byte("foo"), []byte("foovalue")) + b.Put([]byte("bar"), []byte("barvalue")) + mem := memdb.New(&iComparer{comparer.DefaultComparer}, 0) + b.memReplay(mem) + if b.size() != mem.Size() { + t.Errorf("invalid batch size calculation, want=%d got=%d", mem.Size(), b.size()) + } + b.Reset() + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go new file mode 100644 index 0000000000..12a8496210 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/bench_test.go @@ -0,0 +1,509 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "fmt" + "math/rand" + "os" + "path/filepath" + "runtime" + "sync/atomic" + "testing" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +func randomString(r *rand.Rand, n int) []byte { + b := new(bytes.Buffer) + for i := 0; i < n; i++ { + b.WriteByte(' ' + byte(r.Intn(95))) + } + return b.Bytes() +} + +func compressibleStr(r *rand.Rand, frac float32, n int) []byte { + nn := int(float32(n) * frac) + rb := randomString(r, nn) + b := make([]byte, 0, n+nn) + for len(b) < n { + b = append(b, rb...) + } + return b[:n] +} + +type valueGen struct { + src []byte + pos int +} + +func newValueGen(frac float32) *valueGen { + v := new(valueGen) + r := rand.New(rand.NewSource(301)) + v.src = make([]byte, 0, 1048576+100) + for len(v.src) < 1048576 { + v.src = append(v.src, compressibleStr(r, frac, 100)...) + } + return v +} + +func (v *valueGen) get(n int) []byte { + if v.pos+n > len(v.src) { + v.pos = 0 + } + v.pos += n + return v.src[v.pos-n : v.pos] +} + +var benchDB = filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbbench-%d", os.Getuid())) + +type dbBench struct { + b *testing.B + stor storage.Storage + db *DB + + o *opt.Options + ro *opt.ReadOptions + wo *opt.WriteOptions + + keys, values [][]byte +} + +func openDBBench(b *testing.B, noCompress bool) *dbBench { + _, err := os.Stat(benchDB) + if err == nil { + err = os.RemoveAll(benchDB) + if err != nil { + b.Fatal("cannot remove old db: ", err) + } + } + + p := &dbBench{ + b: b, + o: &opt.Options{}, + ro: &opt.ReadOptions{}, + wo: &opt.WriteOptions{}, + } + p.stor, err = storage.OpenFile(benchDB, false) + if err != nil { + b.Fatal("cannot open stor: ", err) + } + if noCompress { + p.o.Compression = opt.NoCompression + } + + p.db, err = Open(p.stor, p.o) + if err != nil { + b.Fatal("cannot open db: ", err) + } + + runtime.GOMAXPROCS(runtime.NumCPU()) + return p +} + +func (p *dbBench) reopen() { + p.db.Close() + var err error + p.db, err = Open(p.stor, p.o) + if err != nil { + p.b.Fatal("Reopen: got error: ", err) + } +} + +func (p *dbBench) populate(n int) { + p.keys, p.values = make([][]byte, n), make([][]byte, n) + v := newValueGen(0.5) + for i := range p.keys { + p.keys[i], p.values[i] = []byte(fmt.Sprintf("%016d", i)), v.get(100) + } +} + +func (p *dbBench) randomize() { + m := len(p.keys) + times := m * 2 + r1, r2 := rand.New(rand.NewSource(0xdeadbeef)), rand.New(rand.NewSource(0xbeefface)) + for n := 0; n < times; n++ { + i, j := r1.Int()%m, r2.Int()%m + if i == j { + continue + } + p.keys[i], p.keys[j] = p.keys[j], p.keys[i] + p.values[i], p.values[j] = p.values[j], p.values[i] + } +} + +func (p *dbBench) writes(perBatch int) { + b := p.b + db := p.db + + n := len(p.keys) + m := n / perBatch + if n%perBatch > 0 { + m++ + } + batches := make([]Batch, m) + j := 0 + for i := range batches { + first := true + for ; j < n && ((j+1)%perBatch != 0 || first); j++ { + first = false + batches[i].Put(p.keys[j], p.values[j]) + } + } + runtime.GC() + + b.ResetTimer() + b.StartTimer() + for i := range batches { + err := db.Write(&(batches[i]), p.wo) + if err != nil { + b.Fatal("write failed: ", err) + } + } + b.StopTimer() + b.SetBytes(116) +} + +func (p *dbBench) gc() { + p.keys, p.values = nil, nil + runtime.GC() +} + +func (p *dbBench) puts() { + b := p.b + db := p.db + + b.ResetTimer() + b.StartTimer() + for i := range p.keys { + err := db.Put(p.keys[i], p.values[i], p.wo) + if err != nil { + b.Fatal("put failed: ", err) + } + } + b.StopTimer() + b.SetBytes(116) +} + +func (p *dbBench) fill() { + b := p.b + db := p.db + + perBatch := 10000 + batch := new(Batch) + for i, n := 0, len(p.keys); i < n; { + first := true + for ; i < n && ((i+1)%perBatch != 0 || first); i++ { + first = false + batch.Put(p.keys[i], p.values[i]) + } + err := db.Write(batch, p.wo) + if err != nil { + b.Fatal("write failed: ", err) + } + batch.Reset() + } +} + +func (p *dbBench) gets() { + b := p.b + db := p.db + + b.ResetTimer() + for i := range p.keys { + _, err := db.Get(p.keys[i], p.ro) + if err != nil { + b.Error("got error: ", err) + } + } + b.StopTimer() +} + +func (p *dbBench) seeks() { + b := p.b + + iter := p.newIter() + defer iter.Release() + b.ResetTimer() + for i := range p.keys { + if !iter.Seek(p.keys[i]) { + b.Error("value not found for: ", string(p.keys[i])) + } + } + b.StopTimer() +} + +func (p *dbBench) newIter() iterator.Iterator { + iter := p.db.NewIterator(nil, p.ro) + err := iter.Error() + if err != nil { + p.b.Fatal("cannot create iterator: ", err) + } + return iter +} + +func (p *dbBench) close() { + if bp, err := p.db.GetProperty("leveldb.blockpool"); err == nil { + p.b.Log("Block pool stats: ", bp) + } + p.db.Close() + p.stor.Close() + os.RemoveAll(benchDB) + p.db = nil + p.keys = nil + p.values = nil + runtime.GC() + runtime.GOMAXPROCS(1) +} + +func BenchmarkDBWrite(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBWriteBatch(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1000) + p.close() +} + +func BenchmarkDBWriteUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBWriteBatchUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.writes(1000) + p.close() +} + +func BenchmarkDBWriteRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.randomize() + p.writes(1) + p.close() +} + +func BenchmarkDBWriteRandomSync(b *testing.B) { + p := openDBBench(b, false) + p.wo.Sync = true + p.populate(b.N) + p.writes(1) + p.close() +} + +func BenchmarkDBOverwrite(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.writes(1) + p.close() +} + +func BenchmarkDBOverwriteRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.writes(1) + p.randomize() + p.writes(1) + p.close() +} + +func BenchmarkDBPut(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.puts() + p.close() +} + +func BenchmarkDBRead(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadGC(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadUncompressed(b *testing.B) { + p := openDBBench(b, true) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadTable(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.reopen() + p.gc() + + iter := p.newIter() + b.ResetTimer() + for iter.Next() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadReverse(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + + iter := p.newIter() + b.ResetTimer() + iter.Last() + for iter.Prev() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBReadReverseTable(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.reopen() + p.gc() + + iter := p.newIter() + b.ResetTimer() + iter.Last() + for iter.Prev() { + } + iter.Release() + b.StopTimer() + b.SetBytes(116) + p.close() +} + +func BenchmarkDBSeek(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.seeks() + p.close() +} + +func BenchmarkDBSeekRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.randomize() + p.seeks() + p.close() +} + +func BenchmarkDBGet(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gets() + p.close() +} + +func BenchmarkDBGetRandom(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.randomize() + p.gets() + p.close() +} + +func BenchmarkDBReadConcurrent(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + for pb.Next() && iter.Next() { + } + }) +} + +func BenchmarkDBReadConcurrent2(b *testing.B) { + p := openDBBench(b, false) + p.populate(b.N) + p.fill() + p.gc() + defer p.close() + + b.ResetTimer() + b.SetBytes(116) + + var dir uint32 + b.RunParallel(func(pb *testing.PB) { + iter := p.newIter() + defer iter.Release() + if atomic.AddUint32(&dir, 1)%2 == 0 { + for pb.Next() && iter.Next() { + } + } else { + if pb.Next() && iter.Last() { + for pb.Next() && iter.Prev() { + } + } + } + }) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go new file mode 100644 index 0000000000..89aef69abb --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/bench_test.go @@ -0,0 +1,29 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "math/rand" + "testing" + "time" +) + +func BenchmarkLRUCache(b *testing.B) { + c := NewCache(NewLRU(10000)) + + b.SetParallelism(10) + b.RunParallel(func(pb *testing.PB) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + for pb.Next() { + key := uint64(r.Intn(1000000)) + c.Get(0, key, func() (int, Value) { + return 1, key + }).Release() + } + }) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go new file mode 100644 index 0000000000..b7b1638455 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache_test.go @@ -0,0 +1,553 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package cache + +import ( + "math/rand" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" +) + +type int32o int32 + +func (o *int32o) acquire() { + if atomic.AddInt32((*int32)(o), 1) != 1 { + panic("BUG: invalid ref") + } +} + +func (o *int32o) Release() { + if atomic.AddInt32((*int32)(o), -1) != 0 { + panic("BUG: invalid ref") + } +} + +type releaserFunc struct { + fn func() + value Value +} + +func (r releaserFunc) Release() { + if r.fn != nil { + r.fn() + } +} + +func set(c *Cache, ns, key uint64, value Value, charge int, relf func()) *Handle { + return c.Get(ns, key, func() (int, Value) { + if relf != nil { + return charge, releaserFunc{relf, value} + } + return charge, value + }) +} + +func TestCacheMap(t *testing.T) { + runtime.GOMAXPROCS(runtime.NumCPU()) + + nsx := []struct { + nobjects, nhandles, concurrent, repeat int + }{ + {10000, 400, 50, 3}, + {100000, 1000, 100, 10}, + } + + var ( + objects [][]int32o + handles [][]unsafe.Pointer + ) + + for _, x := range nsx { + objects = append(objects, make([]int32o, x.nobjects)) + handles = append(handles, make([]unsafe.Pointer, x.nhandles)) + } + + c := NewCache(nil) + + wg := new(sync.WaitGroup) + var done int32 + + for ns, x := range nsx { + for i := 0; i < x.concurrent; i++ { + wg.Add(1) + go func(ns, i, repeat int, objects []int32o, handles []unsafe.Pointer) { + defer wg.Done() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + for j := len(objects) * repeat; j >= 0; j-- { + key := uint64(r.Intn(len(objects))) + h := c.Get(uint64(ns), key, func() (int, Value) { + o := &objects[key] + o.acquire() + return 1, o + }) + if v := h.Value().(*int32o); v != &objects[key] { + t.Fatalf("#%d invalid value: want=%p got=%p", ns, &objects[key], v) + } + if objects[key] != 1 { + t.Fatalf("#%d invalid object %d: %d", ns, key, objects[key]) + } + if !atomic.CompareAndSwapPointer(&handles[r.Intn(len(handles))], nil, unsafe.Pointer(h)) { + h.Release() + } + } + }(ns, i, x.repeat, objects[ns], handles[ns]) + } + + go func(handles []unsafe.Pointer) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + + for atomic.LoadInt32(&done) == 0 { + i := r.Intn(len(handles)) + h := (*Handle)(atomic.LoadPointer(&handles[i])) + if h != nil && atomic.CompareAndSwapPointer(&handles[i], unsafe.Pointer(h), nil) { + h.Release() + } + time.Sleep(time.Millisecond) + } + }(handles[ns]) + } + + go func() { + handles := make([]*Handle, 100000) + for atomic.LoadInt32(&done) == 0 { + for i := range handles { + handles[i] = c.Get(999999999, uint64(i), func() (int, Value) { + return 1, 1 + }) + } + for _, h := range handles { + h.Release() + } + } + }() + + wg.Wait() + + atomic.StoreInt32(&done, 1) + + for _, handles0 := range handles { + for i := range handles0 { + h := (*Handle)(atomic.LoadPointer(&handles0[i])) + if h != nil && atomic.CompareAndSwapPointer(&handles0[i], unsafe.Pointer(h), nil) { + h.Release() + } + } + } + + for ns, objects0 := range objects { + for i, o := range objects0 { + if o != 0 { + t.Fatalf("invalid object #%d.%d: ref=%d", ns, i, o) + } + } + } +} + +func TestCacheMap_NodesAndSize(t *testing.T) { + c := NewCache(nil) + if c.Nodes() != 0 { + t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) + } + if c.Size() != 0 { + t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) + } + set(c, 0, 1, 1, 1, nil) + set(c, 0, 2, 2, 2, nil) + set(c, 1, 1, 3, 3, nil) + set(c, 2, 1, 4, 1, nil) + if c.Nodes() != 4 { + t.Errorf("invalid nodes counter: want=%d got=%d", 4, c.Nodes()) + } + if c.Size() != 7 { + t.Errorf("invalid size counter: want=%d got=%d", 4, c.Size()) + } +} + +func TestLRUCache_Capacity(t *testing.T) { + c := NewCache(NewLRU(10)) + if c.Capacity() != 10 { + t.Errorf("invalid capacity: want=%d got=%d", 10, c.Capacity()) + } + set(c, 0, 1, 1, 1, nil).Release() + set(c, 0, 2, 2, 2, nil).Release() + set(c, 1, 1, 3, 3, nil).Release() + set(c, 2, 1, 4, 1, nil).Release() + set(c, 2, 2, 5, 1, nil).Release() + set(c, 2, 3, 6, 1, nil).Release() + set(c, 2, 4, 7, 1, nil).Release() + set(c, 2, 5, 8, 1, nil).Release() + if c.Nodes() != 7 { + t.Errorf("invalid nodes counter: want=%d got=%d", 7, c.Nodes()) + } + if c.Size() != 10 { + t.Errorf("invalid size counter: want=%d got=%d", 10, c.Size()) + } + c.SetCapacity(9) + if c.Capacity() != 9 { + t.Errorf("invalid capacity: want=%d got=%d", 9, c.Capacity()) + } + if c.Nodes() != 6 { + t.Errorf("invalid nodes counter: want=%d got=%d", 6, c.Nodes()) + } + if c.Size() != 8 { + t.Errorf("invalid size counter: want=%d got=%d", 8, c.Size()) + } +} + +func TestCacheMap_NilValue(t *testing.T) { + c := NewCache(NewLRU(10)) + h := c.Get(0, 0, func() (size int, value Value) { + return 1, nil + }) + if h != nil { + t.Error("cache handle is non-nil") + } + if c.Nodes() != 0 { + t.Errorf("invalid nodes counter: want=%d got=%d", 0, c.Nodes()) + } + if c.Size() != 0 { + t.Errorf("invalid size counter: want=%d got=%d", 0, c.Size()) + } +} + +func TestLRUCache_GetLatency(t *testing.T) { + runtime.GOMAXPROCS(runtime.NumCPU()) + + const ( + concurrentSet = 30 + concurrentGet = 3 + duration = 3 * time.Second + delay = 3 * time.Millisecond + maxkey = 100000 + ) + + var ( + set, getHit, getAll int32 + getMaxLatency, getDuration int64 + ) + + c := NewCache(NewLRU(5000)) + wg := &sync.WaitGroup{} + until := time.Now().Add(duration) + for i := 0; i < concurrentSet; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for time.Now().Before(until) { + c.Get(0, uint64(r.Intn(maxkey)), func() (int, Value) { + time.Sleep(delay) + atomic.AddInt32(&set, 1) + return 1, 1 + }).Release() + } + }(i) + } + for i := 0; i < concurrentGet; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for { + mark := time.Now() + if mark.Before(until) { + h := c.Get(0, uint64(r.Intn(maxkey)), nil) + latency := int64(time.Now().Sub(mark)) + m := atomic.LoadInt64(&getMaxLatency) + if latency > m { + atomic.CompareAndSwapInt64(&getMaxLatency, m, latency) + } + atomic.AddInt64(&getDuration, latency) + if h != nil { + atomic.AddInt32(&getHit, 1) + h.Release() + } + atomic.AddInt32(&getAll, 1) + } else { + break + } + } + }(i) + } + + wg.Wait() + getAvglatency := time.Duration(getDuration) / time.Duration(getAll) + t.Logf("set=%d getHit=%d getAll=%d getMaxLatency=%v getAvgLatency=%v", + set, getHit, getAll, time.Duration(getMaxLatency), getAvglatency) + + if getAvglatency > delay/3 { + t.Errorf("get avg latency > %v: got=%v", delay/3, getAvglatency) + } +} + +func TestLRUCache_HitMiss(t *testing.T) { + cases := []struct { + key uint64 + value string + }{ + {1, "vvvvvvvvv"}, + {100, "v1"}, + {0, "v2"}, + {12346, "v3"}, + {777, "v4"}, + {999, "v5"}, + {7654, "v6"}, + {2, "v7"}, + {3, "v8"}, + {9, "v9"}, + } + + setfin := 0 + c := NewCache(NewLRU(1000)) + for i, x := range cases { + set(c, 0, x.key, x.value, len(x.value), func() { + setfin++ + }).Release() + for j, y := range cases { + h := c.Get(0, y.key, nil) + if j <= i { + // should hit + if h == nil { + t.Errorf("case '%d' iteration '%d' is miss", i, j) + } else { + if x := h.Value().(releaserFunc).value.(string); x != y.value { + t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) + } + } + } else { + // should miss + if h != nil { + t.Errorf("case '%d' iteration '%d' is hit , value '%s'", i, j, h.Value().(releaserFunc).value.(string)) + } + } + if h != nil { + h.Release() + } + } + } + + for i, x := range cases { + finalizerOk := false + c.Delete(0, x.key, func() { + finalizerOk = true + }) + + if !finalizerOk { + t.Errorf("case %d delete finalizer not executed", i) + } + + for j, y := range cases { + h := c.Get(0, y.key, nil) + if j > i { + // should hit + if h == nil { + t.Errorf("case '%d' iteration '%d' is miss", i, j) + } else { + if x := h.Value().(releaserFunc).value.(string); x != y.value { + t.Errorf("case '%d' iteration '%d' has invalid value got '%s', want '%s'", i, j, x, y.value) + } + } + } else { + // should miss + if h != nil { + t.Errorf("case '%d' iteration '%d' is hit, value '%s'", i, j, h.Value().(releaserFunc).value.(string)) + } + } + if h != nil { + h.Release() + } + } + } + + if setfin != len(cases) { + t.Errorf("some set finalizer may not be executed, want=%d got=%d", len(cases), setfin) + } +} + +func TestLRUCache_Eviction(t *testing.T) { + c := NewCache(NewLRU(12)) + o1 := set(c, 0, 1, 1, 1, nil) + set(c, 0, 2, 2, 1, nil).Release() + set(c, 0, 3, 3, 1, nil).Release() + set(c, 0, 4, 4, 1, nil).Release() + set(c, 0, 5, 5, 1, nil).Release() + if h := c.Get(0, 2, nil); h != nil { // 1,3,4,5,2 + h.Release() + } + set(c, 0, 9, 9, 10, nil).Release() // 5,2,9 + + for _, key := range []uint64{9, 2, 5, 1} { + h := c.Get(0, key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + o1.Release() + for _, key := range []uint64{1, 2, 5} { + h := c.Get(0, key, nil) + if h == nil { + t.Errorf("miss for key '%d'", key) + } else { + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } + for _, key := range []uint64{3, 4, 9} { + h := c.Get(0, key, nil) + if h != nil { + t.Errorf("hit for key '%d'", key) + if x := h.Value().(int); x != int(key) { + t.Errorf("invalid value for key '%d' want '%d', got '%d'", key, key, x) + } + h.Release() + } + } +} + +func TestLRUCache_Evict(t *testing.T) { + c := NewCache(NewLRU(6)) + set(c, 0, 1, 1, 1, nil).Release() + set(c, 0, 2, 2, 1, nil).Release() + set(c, 1, 1, 4, 1, nil).Release() + set(c, 1, 2, 5, 1, nil).Release() + set(c, 2, 1, 6, 1, nil).Release() + set(c, 2, 2, 7, 1, nil).Release() + + for ns := 0; ns < 3; ns++ { + for key := 1; key < 3; key++ { + if h := c.Get(uint64(ns), uint64(key), nil); h != nil { + h.Release() + } else { + t.Errorf("Cache.Get on #%d.%d return nil", ns, key) + } + } + } + + if ok := c.Evict(0, 1); !ok { + t.Error("first Cache.Evict on #0.1 return false") + } + if ok := c.Evict(0, 1); ok { + t.Error("second Cache.Evict on #0.1 return true") + } + if h := c.Get(0, 1, nil); h != nil { + t.Errorf("Cache.Get on #0.1 return non-nil: %v", h.Value()) + } + + c.EvictNS(1) + if h := c.Get(1, 1, nil); h != nil { + t.Errorf("Cache.Get on #1.1 return non-nil: %v", h.Value()) + } + if h := c.Get(1, 2, nil); h != nil { + t.Errorf("Cache.Get on #1.2 return non-nil: %v", h.Value()) + } + + c.EvictAll() + for ns := 0; ns < 3; ns++ { + for key := 1; key < 3; key++ { + if h := c.Get(uint64(ns), uint64(key), nil); h != nil { + t.Errorf("Cache.Get on #%d.%d return non-nil: %v", ns, key, h.Value()) + } + } + } +} + +func TestLRUCache_Delete(t *testing.T) { + delFuncCalled := 0 + delFunc := func() { + delFuncCalled++ + } + + c := NewCache(NewLRU(2)) + set(c, 0, 1, 1, 1, nil).Release() + set(c, 0, 2, 2, 1, nil).Release() + + if ok := c.Delete(0, 1, delFunc); !ok { + t.Error("Cache.Delete on #1 return false") + } + if h := c.Get(0, 1, nil); h != nil { + t.Errorf("Cache.Get on #1 return non-nil: %v", h.Value()) + } + if ok := c.Delete(0, 1, delFunc); ok { + t.Error("Cache.Delete on #1 return true") + } + + h2 := c.Get(0, 2, nil) + if h2 == nil { + t.Error("Cache.Get on #2 return nil") + } + if ok := c.Delete(0, 2, delFunc); !ok { + t.Error("(1) Cache.Delete on #2 return false") + } + if ok := c.Delete(0, 2, delFunc); !ok { + t.Error("(2) Cache.Delete on #2 return false") + } + + set(c, 0, 3, 3, 1, nil).Release() + set(c, 0, 4, 4, 1, nil).Release() + c.Get(0, 2, nil).Release() + + for key := 2; key <= 4; key++ { + if h := c.Get(0, uint64(key), nil); h != nil { + h.Release() + } else { + t.Errorf("Cache.Get on #%d return nil", key) + } + } + + h2.Release() + if h := c.Get(0, 2, nil); h != nil { + t.Errorf("Cache.Get on #2 return non-nil: %v", h.Value()) + } + + if delFuncCalled != 4 { + t.Errorf("delFunc isn't called 4 times: got=%d", delFuncCalled) + } +} + +func TestLRUCache_Close(t *testing.T) { + relFuncCalled := 0 + relFunc := func() { + relFuncCalled++ + } + delFuncCalled := 0 + delFunc := func() { + delFuncCalled++ + } + + c := NewCache(NewLRU(2)) + set(c, 0, 1, 1, 1, relFunc).Release() + set(c, 0, 2, 2, 1, relFunc).Release() + + h3 := set(c, 0, 3, 3, 1, relFunc) + if h3 == nil { + t.Error("Cache.Get on #3 return nil") + } + if ok := c.Delete(0, 3, delFunc); !ok { + t.Error("Cache.Delete on #3 return false") + } + + c.Close() + + if relFuncCalled != 3 { + t.Errorf("relFunc isn't called 3 times: got=%d", relFuncCalled) + } + if delFuncCalled != 1 { + t.Errorf("delFunc isn't called 1 times: got=%d", delFuncCalled) + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go new file mode 100644 index 0000000000..fef2026c85 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/corrupt_test.go @@ -0,0 +1,496 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "fmt" + "io" + "math/rand" + "testing" + + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" +) + +const ctValSize = 1000 + +type dbCorruptHarness struct { + dbHarness +} + +func newDbCorruptHarnessWopt(t *testing.T, o *opt.Options) *dbCorruptHarness { + h := new(dbCorruptHarness) + h.init(t, o) + return h +} + +func newDbCorruptHarness(t *testing.T) *dbCorruptHarness { + return newDbCorruptHarnessWopt(t, &opt.Options{ + BlockCacheCapacity: 100, + Strict: opt.StrictJournalChecksum, + }) +} + +func (h *dbCorruptHarness) recover() { + p := &h.dbHarness + t := p.t + + var err error + p.db, err = Recover(h.stor, h.o) + if err != nil { + t.Fatal("Repair: got error: ", err) + } +} + +func (h *dbCorruptHarness) build(n int) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := 0; i < n; i++ { + batch.Reset() + batch.Put(tkey(i), tval(i, ctValSize)) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) buildShuffled(n int, rnd *rand.Rand) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := range rnd.Perm(n) { + batch.Reset() + batch.Put(tkey(i), tval(i, ctValSize)) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) deleteRand(n, max int, rnd *rand.Rand) { + p := &h.dbHarness + t := p.t + db := p.db + + batch := new(Batch) + for i := 0; i < n; i++ { + batch.Reset() + batch.Delete(tkey(rnd.Intn(max))) + err := db.Write(batch, p.wo) + if err != nil { + t.Fatal("write error: ", err) + } + } +} + +func (h *dbCorruptHarness) corrupt(ft storage.FileType, fi, offset, n int) { + p := &h.dbHarness + t := p.t + + fds, _ := p.stor.List(ft) + sortFds(fds) + if fi < 0 { + fi = len(fds) - 1 + } + if fi >= len(fds) { + t.Fatalf("no such file with type %q with index %d", ft, fi) + } + + fd := fds[fi] + r, err := h.stor.Open(fd) + if err != nil { + t.Fatal("cannot open file: ", err) + } + x, err := r.Seek(0, 2) + if err != nil { + t.Fatal("cannot query file size: ", err) + } + m := int(x) + if _, err := r.Seek(0, 0); err != nil { + t.Fatal(err) + } + + if offset < 0 { + if -offset > m { + offset = 0 + } else { + offset = m + offset + } + } + if offset > m { + offset = m + } + if offset+n > m { + n = m - offset + } + + buf := make([]byte, m) + _, err = io.ReadFull(r, buf) + if err != nil { + t.Fatal("cannot read file: ", err) + } + r.Close() + + for i := 0; i < n; i++ { + buf[offset+i] ^= 0x80 + } + + err = h.stor.Remove(fd) + if err != nil { + t.Fatal("cannot remove old file: ", err) + } + w, err := h.stor.Create(fd) + if err != nil { + t.Fatal("cannot create new file: ", err) + } + _, err = w.Write(buf) + if err != nil { + t.Fatal("cannot write new file: ", err) + } + w.Close() +} + +func (h *dbCorruptHarness) removeAll(ft storage.FileType) { + fds, err := h.stor.List(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + for _, fd := range fds { + if err := h.stor.Remove(fd); err != nil { + h.t.Error("remove file: ", err) + } + } +} + +func (h *dbCorruptHarness) forceRemoveAll(ft storage.FileType) { + fds, err := h.stor.List(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + for _, fd := range fds { + if err := h.stor.ForceRemove(fd); err != nil { + h.t.Error("remove file: ", err) + } + } +} + +func (h *dbCorruptHarness) removeOne(ft storage.FileType) { + fds, err := h.stor.List(ft) + if err != nil { + h.t.Fatal("get files: ", err) + } + fd := fds[rand.Intn(len(fds))] + h.t.Logf("removing file @%d", fd.Num) + if err := h.stor.Remove(fd); err != nil { + h.t.Error("remove file: ", err) + } +} + +func (h *dbCorruptHarness) check(min, max int) { + p := &h.dbHarness + t := p.t + db := p.db + + var n, badk, badv, missed, good int + iter := db.NewIterator(nil, p.ro) + for iter.Next() { + k := 0 + fmt.Sscanf(string(iter.Key()), "%d", &k) + if k < n { + badk++ + continue + } + missed += k - n + n = k + 1 + if !bytes.Equal(iter.Value(), tval(k, ctValSize)) { + badv++ + } else { + good++ + } + } + err := iter.Error() + iter.Release() + t.Logf("want=%d..%d got=%d badkeys=%d badvalues=%d missed=%d, err=%v", + min, max, good, badk, badv, missed, err) + if good < min || good > max { + t.Errorf("good entries number not in range") + } +} + +func TestCorruptDB_Journal(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.build(100) + h.check(100, 100) + h.closeDB() + h.corrupt(storage.TypeJournal, -1, 19, 1) + h.corrupt(storage.TypeJournal, -1, 32*1024+1000, 1) + + h.openDB() + h.check(36, 36) +} + +func TestCorruptDB_Table(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.build(100) + h.compactMem() + h.compactRangeAt(0, "", "") + h.compactRangeAt(1, "", "") + h.closeDB() + h.corrupt(storage.TypeTable, -1, 100, 1) + + h.openDB() + h.check(99, 99) +} + +func TestCorruptDB_TableIndex(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.build(10000) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, -1, -2000, 500) + + h.openDB() + h.check(5000, 9999) +} + +func TestCorruptDB_MissingManifest(t *testing.T) { + rnd := rand.New(rand.NewSource(0x0badda7a)) + h := newDbCorruptHarnessWopt(t, &opt.Options{ + BlockCacheCapacity: 100, + Strict: opt.StrictJournalChecksum, + WriteBuffer: 1000 * 60, + }) + defer h.close() + + h.build(1000) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.deleteRand(500, 1000, rnd) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.deleteRand(500, 1000, rnd) + h.compactMem() + h.buildShuffled(1000, rnd) + h.compactMem() + h.closeDB() + + h.forceRemoveAll(storage.TypeManifest) + h.openAssert(false) + + h.recover() + h.check(1000, 1000) + h.build(1000) + h.compactMem() + h.compactRange("", "") + h.closeDB() + + h.recover() + h.check(1000, 1000) +} + +func TestCorruptDB_SequenceNumberRecovery(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.put("foo", "v1") + h.put("foo", "v2") + h.put("foo", "v3") + h.put("foo", "v4") + h.put("foo", "v5") + h.closeDB() + + h.recover() + h.getVal("foo", "v5") + h.put("foo", "v6") + h.getVal("foo", "v6") + + h.reopenDB() + h.getVal("foo", "v6") +} + +func TestCorruptDB_SequenceNumberRecoveryTable(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.put("foo", "v1") + h.put("foo", "v2") + h.put("foo", "v3") + h.compactMem() + h.put("foo", "v4") + h.put("foo", "v5") + h.compactMem() + h.closeDB() + + h.recover() + h.getVal("foo", "v5") + h.put("foo", "v6") + h.getVal("foo", "v6") + + h.reopenDB() + h.getVal("foo", "v6") +} + +func TestCorruptDB_CorruptedManifest(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.put("foo", "hello") + h.compactMem() + h.compactRange("", "") + h.closeDB() + h.corrupt(storage.TypeManifest, -1, 0, 1000) + h.openAssert(false) + + h.recover() + h.getVal("foo", "hello") +} + +func TestCorruptDB_CompactionInputError(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.build(10) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, -1, 100, 1) + + h.openDB() + h.check(9, 9) + + h.build(10000) + h.check(10000, 10000) +} + +func TestCorruptDB_UnrelatedKeys(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.build(10) + h.compactMem() + h.closeDB() + h.corrupt(storage.TypeTable, -1, 100, 1) + + h.openDB() + h.put(string(tkey(1000)), string(tval(1000, ctValSize))) + h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) + h.compactMem() + h.getVal(string(tkey(1000)), string(tval(1000, ctValSize))) +} + +func TestCorruptDB_Level0NewerFileHasOlderSeqnum(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("a", "v2") + h.put("b", "v2") + h.compactMem() + h.put("a", "v3") + h.put("b", "v3") + h.compactMem() + h.put("c", "v0") + h.put("d", "v0") + h.compactMem() + h.compactRangeAt(1, "", "") + h.closeDB() + + h.recover() + h.getVal("a", "v3") + h.getVal("b", "v3") + h.getVal("c", "v0") + h.getVal("d", "v0") +} + +func TestCorruptDB_RecoverInvalidSeq_Issue53(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("a", "v2") + h.put("b", "v2") + h.compactMem() + h.put("a", "v3") + h.put("b", "v3") + h.compactMem() + h.put("c", "v0") + h.put("d", "v0") + h.compactMem() + h.compactRangeAt(0, "", "") + h.closeDB() + + h.recover() + h.getVal("a", "v3") + h.getVal("b", "v3") + h.getVal("c", "v0") + h.getVal("d", "v0") +} + +func TestCorruptDB_MissingTableFiles(t *testing.T) { + h := newDbCorruptHarness(t) + defer h.close() + + h.put("a", "v1") + h.put("b", "v1") + h.compactMem() + h.put("c", "v2") + h.put("d", "v2") + h.compactMem() + h.put("e", "v3") + h.put("f", "v3") + h.closeDB() + + h.removeOne(storage.TypeTable) + h.openAssert(false) +} + +func TestCorruptDB_RecoverTable(t *testing.T) { + h := newDbCorruptHarnessWopt(t, &opt.Options{ + WriteBuffer: 112 * opt.KiB, + CompactionTableSize: 90 * opt.KiB, + Filter: filter.NewBloomFilter(10), + }) + defer h.close() + + h.build(1000) + h.compactMem() + h.compactRangeAt(0, "", "") + h.compactRangeAt(1, "", "") + seq := h.db.seq + h.closeDB() + h.corrupt(storage.TypeTable, 0, 1000, 1) + h.corrupt(storage.TypeTable, 3, 10000, 1) + // Corrupted filter shouldn't affect recovery. + h.corrupt(storage.TypeTable, 3, 113888, 10) + h.corrupt(storage.TypeTable, -1, 20000, 1) + + h.recover() + if h.db.seq != seq { + t.Errorf("invalid seq, want=%d got=%d", seq, h.db.seq) + } + h.check(985, 985) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_test.go new file mode 100644 index 0000000000..6eaf0d558e --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_test.go @@ -0,0 +1,2907 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "container/list" + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + "unsafe" + + "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/errors" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func tkey(i int) []byte { + return []byte(fmt.Sprintf("%016d", i)) +} + +func tval(seed, n int) []byte { + r := rand.New(rand.NewSource(int64(seed))) + return randomString(r, n) +} + +func testingLogger(t *testing.T) func(log string) { + return func(log string) { + t.Log(log) + } +} + +func testingPreserveOnFailed(t *testing.T) func() (preserve bool, err error) { + return func() (preserve bool, err error) { + preserve = t.Failed() + return + } +} + +type dbHarness struct { + t *testing.T + + stor *testutil.Storage + db *DB + o *opt.Options + ro *opt.ReadOptions + wo *opt.WriteOptions +} + +func newDbHarnessWopt(t *testing.T, o *opt.Options) *dbHarness { + h := new(dbHarness) + h.init(t, o) + return h +} + +func newDbHarness(t *testing.T) *dbHarness { + return newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true}) +} + +func (h *dbHarness) init(t *testing.T, o *opt.Options) { + gomega.RegisterTestingT(t) + h.t = t + h.stor = testutil.NewStorage() + h.stor.OnLog(testingLogger(t)) + h.stor.OnClose(testingPreserveOnFailed(t)) + h.o = o + h.ro = nil + h.wo = nil + + if err := h.openDB0(); err != nil { + // So that it will come after fatal message. + defer h.stor.Close() + h.t.Fatal("Open (init): got error: ", err) + } +} + +func (h *dbHarness) openDB0() (err error) { + h.t.Log("opening DB") + h.db, err = Open(h.stor, h.o) + return +} + +func (h *dbHarness) openDB() { + if err := h.openDB0(); err != nil { + h.t.Fatal("Open: got error: ", err) + } +} + +func (h *dbHarness) closeDB0() error { + h.t.Log("closing DB") + return h.db.Close() +} + +func (h *dbHarness) closeDB() { + if h.db != nil { + if err := h.closeDB0(); err != nil { + h.t.Error("Close: got error: ", err) + } + } + h.stor.CloseCheck() + runtime.GC() +} + +func (h *dbHarness) reopenDB() { + if h.db != nil { + h.closeDB() + } + h.openDB() +} + +func (h *dbHarness) close() { + if h.db != nil { + h.closeDB0() + h.db = nil + } + h.stor.Close() + h.stor = nil + runtime.GC() +} + +func (h *dbHarness) openAssert(want bool) { + db, err := Open(h.stor, h.o) + if err != nil { + if want { + h.t.Error("Open: assert: got error: ", err) + } else { + h.t.Log("Open: assert: got error (expected): ", err) + } + } else { + if !want { + h.t.Error("Open: assert: expect error") + } + db.Close() + } +} + +func (h *dbHarness) write(batch *Batch) { + if err := h.db.Write(batch, h.wo); err != nil { + h.t.Error("Write: got error: ", err) + } +} + +func (h *dbHarness) put(key, value string) { + if err := h.db.Put([]byte(key), []byte(value), h.wo); err != nil { + h.t.Error("Put: got error: ", err) + } +} + +func (h *dbHarness) putMulti(n int, low, hi string) { + for i := 0; i < n; i++ { + h.put(low, "begin") + h.put(hi, "end") + h.compactMem() + } +} + +func (h *dbHarness) maxNextLevelOverlappingBytes(want int64) { + t := h.t + db := h.db + + var ( + maxOverlaps int64 + maxLevel int + ) + v := db.s.version() + if len(v.levels) > 2 { + for i, tt := range v.levels[1 : len(v.levels)-1] { + level := i + 1 + next := v.levels[level+1] + for _, t := range tt { + r := next.getOverlaps(nil, db.s.icmp, t.imin.ukey(), t.imax.ukey(), false) + sum := r.size() + if sum > maxOverlaps { + maxOverlaps = sum + maxLevel = level + } + } + } + } + v.release() + + if maxOverlaps > want { + t.Errorf("next level most overlapping bytes is more than %d, got=%d level=%d", want, maxOverlaps, maxLevel) + } else { + t.Logf("next level most overlapping bytes is %d, level=%d want=%d", maxOverlaps, maxLevel, want) + } +} + +func (h *dbHarness) delete(key string) { + t := h.t + db := h.db + + err := db.Delete([]byte(key), h.wo) + if err != nil { + t.Error("Delete: got error: ", err) + } +} + +func (h *dbHarness) assertNumKeys(want int) { + iter := h.db.NewIterator(nil, h.ro) + defer iter.Release() + got := 0 + for iter.Next() { + got++ + } + if err := iter.Error(); err != nil { + h.t.Error("assertNumKeys: ", err) + } + if want != got { + h.t.Errorf("assertNumKeys: want=%d got=%d", want, got) + } +} + +func (h *dbHarness) getr(db Reader, key string, expectFound bool) (found bool, v []byte) { + t := h.t + v, err := db.Get([]byte(key), h.ro) + switch err { + case ErrNotFound: + if expectFound { + t.Errorf("Get: key '%s' not found, want found", key) + } + case nil: + found = true + if !expectFound { + t.Errorf("Get: key '%s' found, want not found", key) + } + default: + t.Error("Get: got error: ", err) + } + return +} + +func (h *dbHarness) get(key string, expectFound bool) (found bool, v []byte) { + return h.getr(h.db, key, expectFound) +} + +func (h *dbHarness) getValr(db Reader, key, value string) { + t := h.t + found, r := h.getr(db, key, true) + if !found { + return + } + rval := string(r) + if rval != value { + t.Errorf("Get: invalid value, got '%s', want '%s'", rval, value) + } +} + +func (h *dbHarness) getVal(key, value string) { + h.getValr(h.db, key, value) +} + +func (h *dbHarness) allEntriesFor(key, want string) { + t := h.t + db := h.db + s := db.s + + ikey := makeInternalKey(nil, []byte(key), keyMaxSeq, keyTypeVal) + iter := db.newRawIterator(nil, nil, nil, nil) + if !iter.Seek(ikey) && iter.Error() != nil { + t.Error("AllEntries: error during seek, err: ", iter.Error()) + return + } + res := "[ " + first := true + for iter.Valid() { + if ukey, _, kt, kerr := parseInternalKey(iter.Key()); kerr == nil { + if s.icmp.uCompare(ikey.ukey(), ukey) != 0 { + break + } + if !first { + res += ", " + } + first = false + switch kt { + case keyTypeVal: + res += string(iter.Value()) + case keyTypeDel: + res += "DEL" + } + } else { + if !first { + res += ", " + } + first = false + res += "CORRUPTED" + } + iter.Next() + } + if !first { + res += " " + } + res += "]" + if res != want { + t.Errorf("AllEntries: assert failed for key %q, got=%q want=%q", key, res, want) + } +} + +// Return a string that contains all key,value pairs in order, +// formatted like "(k1->v1)(k2->v2)". +func (h *dbHarness) getKeyVal(want string) { + t := h.t + db := h.db + + s, err := db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + res := "" + iter := s.NewIterator(nil, nil) + for iter.Next() { + res += fmt.Sprintf("(%s->%s)", string(iter.Key()), string(iter.Value())) + } + iter.Release() + + if res != want { + t.Errorf("GetKeyVal: invalid key/value pair, got=%q want=%q", res, want) + } + s.Release() +} + +func (h *dbHarness) waitCompaction() { + t := h.t + db := h.db + if err := db.compTriggerWait(db.tcompCmdC); err != nil { + t.Error("compaction error: ", err) + } +} + +func (h *dbHarness) waitMemCompaction() { + t := h.t + db := h.db + + if err := db.compTriggerWait(db.mcompCmdC); err != nil { + t.Error("compaction error: ", err) + } +} + +func (h *dbHarness) compactMem() { + t := h.t + db := h.db + + t.Log("starting memdb compaction") + + db.writeLockC <- struct{}{} + defer func() { + <-db.writeLockC + }() + + if _, err := db.rotateMem(0, true); err != nil { + t.Error("compaction error: ", err) + } + + if h.totalTables() == 0 { + t.Error("zero tables after mem compaction") + } + + t.Log("memdb compaction done") +} + +func (h *dbHarness) compactRangeAtErr(level int, min, max string, wanterr bool) { + t := h.t + db := h.db + + var _min, _max []byte + if min != "" { + _min = []byte(min) + } + if max != "" { + _max = []byte(max) + } + + t.Logf("starting table range compaction: level=%d, min=%q, max=%q", level, min, max) + + if err := db.compTriggerRange(db.tcompCmdC, level, _min, _max); err != nil { + if wanterr { + t.Log("CompactRangeAt: got error (expected): ", err) + } else { + t.Error("CompactRangeAt: got error: ", err) + } + } else if wanterr { + t.Error("CompactRangeAt: expect error") + } + + t.Log("table range compaction done") +} + +func (h *dbHarness) compactRangeAt(level int, min, max string) { + h.compactRangeAtErr(level, min, max, false) +} + +func (h *dbHarness) compactRange(min, max string) { + t := h.t + db := h.db + + t.Logf("starting DB range compaction: min=%q, max=%q", min, max) + + var r util.Range + if min != "" { + r.Start = []byte(min) + } + if max != "" { + r.Limit = []byte(max) + } + if err := db.CompactRange(r); err != nil { + t.Error("CompactRange: got error: ", err) + } + + t.Log("DB range compaction done") +} + +func (h *dbHarness) sizeOf(start, limit string) int64 { + sz, err := h.db.SizeOf([]util.Range{ + {[]byte(start), []byte(limit)}, + }) + if err != nil { + h.t.Error("SizeOf: got error: ", err) + } + return sz.Sum() +} + +func (h *dbHarness) sizeAssert(start, limit string, low, hi int64) { + sz := h.sizeOf(start, limit) + if sz < low || sz > hi { + h.t.Errorf("sizeOf %q to %q not in range, want %d - %d, got %d", + shorten(start), shorten(limit), low, hi, sz) + } +} + +func (h *dbHarness) getSnapshot() (s *Snapshot) { + s, err := h.db.GetSnapshot() + if err != nil { + h.t.Fatal("GetSnapshot: got error: ", err) + } + return +} + +func (h *dbHarness) getTablesPerLevel() string { + res := "" + nz := 0 + v := h.db.s.version() + for level, tables := range v.levels { + if level > 0 { + res += "," + } + res += fmt.Sprint(len(tables)) + if len(tables) > 0 { + nz = len(res) + } + } + v.release() + return res[:nz] +} + +func (h *dbHarness) tablesPerLevel(want string) { + res := h.getTablesPerLevel() + if res != want { + h.t.Errorf("invalid tables len, want=%s, got=%s", want, res) + } +} + +func (h *dbHarness) totalTables() (n int) { + v := h.db.s.version() + for _, tables := range v.levels { + n += len(tables) + } + v.release() + return +} + +type keyValue interface { + Key() []byte + Value() []byte +} + +func testKeyVal(t *testing.T, kv keyValue, want string) { + res := string(kv.Key()) + "->" + string(kv.Value()) + if res != want { + t.Errorf("invalid key/value, want=%q, got=%q", want, res) + } +} + +func numKey(num int) string { + return fmt.Sprintf("key%06d", num) +} + +var testingBloomFilter = filter.NewBloomFilter(10) + +func truno(t *testing.T, o *opt.Options, f func(h *dbHarness)) { + for i := 0; i < 4; i++ { + func() { + switch i { + case 0: + case 1: + if o == nil { + o = &opt.Options{ + DisableLargeBatchTransaction: true, + Filter: testingBloomFilter, + } + } else { + old := o + o = &opt.Options{} + *o = *old + o.Filter = testingBloomFilter + } + case 2: + if o == nil { + o = &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + } + } else { + old := o + o = &opt.Options{} + *o = *old + o.Compression = opt.NoCompression + } + } + h := newDbHarnessWopt(t, o) + defer h.close() + switch i { + case 3: + h.reopenDB() + } + f(h) + }() + } +} + +func trun(t *testing.T, f func(h *dbHarness)) { + truno(t, nil, f) +} + +func testAligned(t *testing.T, name string, offset uintptr) { + if offset%8 != 0 { + t.Errorf("field %s offset is not 64-bit aligned", name) + } +} + +func Test_FieldsAligned(t *testing.T) { + p1 := new(DB) + testAligned(t, "DB.seq", unsafe.Offsetof(p1.seq)) + p2 := new(session) + testAligned(t, "session.stNextFileNum", unsafe.Offsetof(p2.stNextFileNum)) + testAligned(t, "session.stJournalNum", unsafe.Offsetof(p2.stJournalNum)) + testAligned(t, "session.stPrevJournalNum", unsafe.Offsetof(p2.stPrevJournalNum)) + testAligned(t, "session.stSeqNum", unsafe.Offsetof(p2.stSeqNum)) +} + +func TestDB_Locking(t *testing.T) { + h := newDbHarness(t) + defer h.stor.Close() + h.openAssert(false) + h.closeDB() + h.openAssert(true) +} + +func TestDB_Empty(t *testing.T) { + trun(t, func(h *dbHarness) { + h.get("foo", false) + + h.reopenDB() + h.get("foo", false) + }) +} + +func TestDB_ReadWrite(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.getVal("foo", "v1") + h.put("bar", "v2") + h.put("foo", "v3") + h.getVal("foo", "v3") + h.getVal("bar", "v2") + + h.reopenDB() + h.getVal("foo", "v3") + h.getVal("bar", "v2") + }) +} + +func TestDB_PutDeleteGet(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.getVal("foo", "v1") + h.put("foo", "v2") + h.getVal("foo", "v2") + h.delete("foo") + h.get("foo", false) + + h.reopenDB() + h.get("foo", false) + }) +} + +func TestDB_EmptyBatch(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.get("foo", false) + err := h.db.Write(new(Batch), h.wo) + if err != nil { + t.Error("writing empty batch yield error: ", err) + } + h.get("foo", false) +} + +func TestDB_GetFromFrozen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 100100, + }) + defer h.close() + + h.put("foo", "v1") + h.getVal("foo", "v1") + + h.stor.Stall(testutil.ModeSync, storage.TypeTable) // Block sync calls + h.put("k1", strings.Repeat("x", 100000)) // Fill memtable + h.put("k2", strings.Repeat("y", 100000)) // Trigger compaction + for i := 0; h.db.getFrozenMem() == nil && i < 100; i++ { + time.Sleep(10 * time.Microsecond) + } + if h.db.getFrozenMem() == nil { + h.stor.Release(testutil.ModeSync, storage.TypeTable) + t.Fatal("No frozen mem") + } + h.getVal("foo", "v1") + h.stor.Release(testutil.ModeSync, storage.TypeTable) // Release sync calls + + h.reopenDB() + h.getVal("foo", "v1") + h.get("k1", true) + h.get("k2", true) +} + +func TestDB_GetFromTable(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.compactMem() + h.getVal("foo", "v1") + }) +} + +func TestDB_GetSnapshot(t *testing.T) { + trun(t, func(h *dbHarness) { + bar := strings.Repeat("b", 200) + h.put("foo", "v1") + h.put(bar, "v1") + + snap, err := h.db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + + h.put("foo", "v2") + h.put(bar, "v2") + + h.getVal("foo", "v2") + h.getVal(bar, "v2") + h.getValr(snap, "foo", "v1") + h.getValr(snap, bar, "v1") + + h.compactMem() + + h.getVal("foo", "v2") + h.getVal(bar, "v2") + h.getValr(snap, "foo", "v1") + h.getValr(snap, bar, "v1") + + snap.Release() + + h.reopenDB() + h.getVal("foo", "v2") + h.getVal(bar, "v2") + }) +} + +func TestDB_GetLevel0Ordering(t *testing.T) { + trun(t, func(h *dbHarness) { + h.db.memdbMaxLevel = 2 + + for i := 0; i < 4; i++ { + h.put("bar", fmt.Sprintf("b%d", i)) + h.put("foo", fmt.Sprintf("v%d", i)) + h.compactMem() + } + h.getVal("foo", "v3") + h.getVal("bar", "b3") + + v := h.db.s.version() + t0len := v.tLen(0) + v.release() + if t0len < 2 { + t.Errorf("level-0 tables is less than 2, got %d", t0len) + } + + h.reopenDB() + h.getVal("foo", "v3") + h.getVal("bar", "b3") + }) +} + +func TestDB_GetOrderedByLevels(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.compactMem() + h.compactRange("a", "z") + h.getVal("foo", "v1") + h.put("foo", "v2") + h.compactMem() + h.getVal("foo", "v2") + }) +} + +func TestDB_GetPicksCorrectFile(t *testing.T) { + trun(t, func(h *dbHarness) { + // Arrange to have multiple files in a non-level-0 level. + h.put("a", "va") + h.compactMem() + h.compactRange("a", "b") + h.put("x", "vx") + h.compactMem() + h.compactRange("x", "y") + h.put("f", "vf") + h.compactMem() + h.compactRange("f", "g") + + h.getVal("a", "va") + h.getVal("f", "vf") + h.getVal("x", "vx") + + h.compactRange("", "") + h.getVal("a", "va") + h.getVal("f", "vf") + h.getVal("x", "vx") + }) +} + +func TestDB_GetEncountersEmptyLevel(t *testing.T) { + trun(t, func(h *dbHarness) { + h.db.memdbMaxLevel = 2 + + // Arrange for the following to happen: + // * sstable A in level 0 + // * nothing in level 1 + // * sstable B in level 2 + // Then do enough Get() calls to arrange for an automatic compaction + // of sstable A. A bug would cause the compaction to be marked as + // occuring at level 1 (instead of the correct level 0). + + // Step 1: First place sstables in levels 0 and 2 + for i := 0; ; i++ { + if i >= 100 { + t.Fatal("could not fill levels-0 and level-2") + } + v := h.db.s.version() + if v.tLen(0) > 0 && v.tLen(2) > 0 { + v.release() + break + } + v.release() + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + + h.getVal("a", "begin") + h.getVal("z", "end") + } + + // Step 2: clear level 1 if necessary. + h.compactRangeAt(1, "", "") + h.tablesPerLevel("1,0,1") + + h.getVal("a", "begin") + h.getVal("z", "end") + + // Step 3: read a bunch of times + for i := 0; i < 200; i++ { + h.get("missing", false) + } + + // Step 4: Wait for compaction to finish + h.waitCompaction() + + v := h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + v.release() + + h.getVal("a", "begin") + h.getVal("z", "end") + }) +} + +func TestDB_IterMultiWithDelete(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("a", "va") + h.put("b", "vb") + h.put("c", "vc") + h.delete("b") + h.get("b", false) + + iter := h.db.NewIterator(nil, nil) + iter.Seek([]byte("c")) + testKeyVal(t, iter, "c->vc") + iter.Prev() + testKeyVal(t, iter, "a->va") + iter.Release() + + h.compactMem() + + iter = h.db.NewIterator(nil, nil) + iter.Seek([]byte("c")) + testKeyVal(t, iter, "c->vc") + iter.Prev() + testKeyVal(t, iter, "a->va") + iter.Release() + }) +} + +func TestDB_IteratorPinsRef(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "hello") + + // Get iterator that will yield the current contents of the DB. + iter := h.db.NewIterator(nil, nil) + + // Write to force compactions + h.put("foo", "newvalue1") + for i := 0; i < 100; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } + h.put("foo", "newvalue2") + + iter.First() + testKeyVal(t, iter, "foo->hello") + if iter.Next() { + t.Errorf("expect eof") + } + iter.Release() +} + +func TestDB_Recover(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.put("baz", "v5") + + h.reopenDB() + h.getVal("foo", "v1") + + h.getVal("foo", "v1") + h.getVal("baz", "v5") + h.put("bar", "v2") + h.put("foo", "v3") + + h.reopenDB() + h.getVal("foo", "v3") + h.put("foo", "v4") + h.getVal("foo", "v4") + h.getVal("bar", "v2") + h.getVal("baz", "v5") + }) +} + +func TestDB_RecoverWithEmptyJournal(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + h.put("foo", "v2") + + h.reopenDB() + h.reopenDB() + h.put("foo", "v3") + + h.reopenDB() + h.getVal("foo", "v3") + }) +} + +func TestDB_RecoverDuringMemtableCompaction(t *testing.T) { + truno(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 1000000}, func(h *dbHarness) { + + h.stor.Stall(testutil.ModeSync, storage.TypeTable) + h.put("big1", strings.Repeat("x", 10000000)) + h.put("big2", strings.Repeat("y", 1000)) + h.put("bar", "v2") + h.stor.Release(testutil.ModeSync, storage.TypeTable) + + h.reopenDB() + h.getVal("bar", "v2") + h.getVal("big1", strings.Repeat("x", 10000000)) + h.getVal("big2", strings.Repeat("y", 1000)) + }) +} + +func TestDB_MinorCompactionsHappen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 10000}) + defer h.close() + + n := 500 + + key := func(i int) string { + return fmt.Sprintf("key%06d", i) + } + + for i := 0; i < n; i++ { + h.put(key(i), key(i)+strings.Repeat("v", 1000)) + } + + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) + } + + h.reopenDB() + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)+strings.Repeat("v", 1000)) + } +} + +func TestDB_RecoverWithLargeJournal(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("big1", strings.Repeat("1", 200000)) + h.put("big2", strings.Repeat("2", 200000)) + h.put("small3", strings.Repeat("3", 10)) + h.put("small4", strings.Repeat("4", 10)) + h.tablesPerLevel("") + + // Make sure that if we re-open with a small write buffer size that + // we flush table files in the middle of a large journal file. + h.o.WriteBuffer = 100000 + h.reopenDB() + h.getVal("big1", strings.Repeat("1", 200000)) + h.getVal("big2", strings.Repeat("2", 200000)) + h.getVal("small3", strings.Repeat("3", 10)) + h.getVal("small4", strings.Repeat("4", 10)) + v := h.db.s.version() + if v.tLen(0) <= 1 { + t.Errorf("tables-0 less than one") + } + v.release() +} + +func TestDB_CompactionsGenerateMultipleFiles(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 10000000, + Compression: opt.NoCompression, + }) + defer h.close() + + v := h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + v.release() + + n := 80 + + // Write 8MB (80 values, each 100K) + for i := 0; i < n; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } + + // Reopening moves updates to level-0 + h.reopenDB() + h.compactRangeAt(0, "", "") + + v = h.db.s.version() + if v.tLen(0) > 0 { + t.Errorf("level-0 tables more than 0, got %d", v.tLen(0)) + } + if v.tLen(1) <= 1 { + t.Errorf("level-1 tables less than 1, got %d", v.tLen(1)) + } + v.release() + + for i := 0; i < n; i++ { + h.getVal(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), 100000/10)) + } +} + +func TestDB_RepeatedWritesToSameKey(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 100000}) + defer h.close() + + maxTables := h.o.GetWriteL0PauseTrigger() + 7 + + value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) + for i := 0; i < 5*maxTables; i++ { + h.put("key", value) + n := h.totalTables() + if n > maxTables { + t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) + } + } +} + +func TestDB_RepeatedWritesToSameKeyAfterReopen(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 100000, + }) + defer h.close() + + h.reopenDB() + + maxTables := h.o.GetWriteL0PauseTrigger() + 7 + + value := strings.Repeat("v", 2*h.o.GetWriteBuffer()) + for i := 0; i < 5*maxTables; i++ { + h.put("key", value) + n := h.totalTables() + if n > maxTables { + t.Errorf("total tables exceed %d, got=%d, iter=%d", maxTables, n, i) + } + } +} + +func TestDB_SparseMerge(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, Compression: opt.NoCompression}) + defer h.close() + + h.putMulti(7, "A", "Z") + + // Suppose there is: + // small amount of data with prefix A + // large amount of data with prefix B + // small amount of data with prefix C + // and that recent updates have made small changes to all three prefixes. + // Check that we do not do a compaction that merges all of B in one shot. + h.put("A", "va") + value := strings.Repeat("x", 1000) + for i := 0; i < 100000; i++ { + h.put(fmt.Sprintf("B%010d", i), value) + } + h.put("C", "vc") + h.compactMem() + h.compactRangeAt(0, "", "") + h.waitCompaction() + + // Make sparse update + h.put("A", "va2") + h.put("B100", "bvalue2") + h.put("C", "vc2") + h.compactMem() + + h.waitCompaction() + h.maxNextLevelOverlappingBytes(20 * 1048576) + h.compactRangeAt(0, "", "") + h.waitCompaction() + h.maxNextLevelOverlappingBytes(20 * 1048576) + h.compactRangeAt(1, "", "") + h.waitCompaction() + h.maxNextLevelOverlappingBytes(20 * 1048576) +} + +func TestDB_SizeOf(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + WriteBuffer: 10000000, + }) + defer h.close() + + h.sizeAssert("", "xyz", 0, 0) + h.reopenDB() + h.sizeAssert("", "xyz", 0, 0) + + // Write 8MB (80 values, each 100K) + n := 80 + s1 := 100000 + s2 := 105000 + + for i := 0; i < n; i++ { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), s1/10)) + } + + // 0 because SizeOf() does not account for memtable space + h.sizeAssert("", numKey(50), 0, 0) + + for r := 0; r < 3; r++ { + h.reopenDB() + + for cs := 0; cs < n; cs += 10 { + for i := 0; i < n; i += 10 { + h.sizeAssert("", numKey(i), int64(s1*i), int64(s2*i)) + h.sizeAssert("", numKey(i)+".suffix", int64(s1*(i+1)), int64(s2*(i+1))) + h.sizeAssert(numKey(i), numKey(i+10), int64(s1*10), int64(s2*10)) + } + + h.sizeAssert("", numKey(50), int64(s1*50), int64(s2*50)) + h.sizeAssert("", numKey(50)+".suffix", int64(s1*50), int64(s2*50)) + + h.compactRangeAt(0, numKey(cs), numKey(cs+9)) + } + + v := h.db.s.version() + if v.tLen(0) != 0 { + t.Errorf("level-0 tables was not zero, got %d", v.tLen(0)) + } + if v.tLen(1) == 0 { + t.Error("level-1 tables was zero") + } + v.release() + } +} + +func TestDB_SizeOf_MixOfSmallAndLarge(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + }) + defer h.close() + + sizes := []int64{ + 10000, + 10000, + 100000, + 10000, + 100000, + 10000, + 300000, + 10000, + } + + for i, n := range sizes { + h.put(numKey(i), strings.Repeat(fmt.Sprintf("v%09d", i), int(n)/10)) + } + + for r := 0; r < 3; r++ { + h.reopenDB() + + var x int64 + for i, n := range sizes { + y := x + if i > 0 { + y += 1000 + } + h.sizeAssert("", numKey(i), x, y) + x += n + } + + h.sizeAssert(numKey(3), numKey(5), 110000, 111000) + + h.compactRangeAt(0, "", "") + } +} + +func TestDB_Snapshot(t *testing.T) { + trun(t, func(h *dbHarness) { + h.put("foo", "v1") + s1 := h.getSnapshot() + h.put("foo", "v2") + s2 := h.getSnapshot() + h.put("foo", "v3") + s3 := h.getSnapshot() + h.put("foo", "v4") + + h.getValr(s1, "foo", "v1") + h.getValr(s2, "foo", "v2") + h.getValr(s3, "foo", "v3") + h.getVal("foo", "v4") + + s3.Release() + h.getValr(s1, "foo", "v1") + h.getValr(s2, "foo", "v2") + h.getVal("foo", "v4") + + s1.Release() + h.getValr(s2, "foo", "v2") + h.getVal("foo", "v4") + + s2.Release() + h.getVal("foo", "v4") + }) +} + +func TestDB_SnapshotList(t *testing.T) { + db := &DB{snapsList: list.New()} + e0a := db.acquireSnapshot() + e0b := db.acquireSnapshot() + db.seq = 1 + e1 := db.acquireSnapshot() + db.seq = 2 + e2 := db.acquireSnapshot() + + if db.minSeq() != 0 { + t.Fatalf("invalid sequence number, got=%d", db.minSeq()) + } + db.releaseSnapshot(e0a) + if db.minSeq() != 0 { + t.Fatalf("invalid sequence number, got=%d", db.minSeq()) + } + db.releaseSnapshot(e2) + if db.minSeq() != 0 { + t.Fatalf("invalid sequence number, got=%d", db.minSeq()) + } + db.releaseSnapshot(e0b) + if db.minSeq() != 1 { + t.Fatalf("invalid sequence number, got=%d", db.minSeq()) + } + e2 = db.acquireSnapshot() + if db.minSeq() != 1 { + t.Fatalf("invalid sequence number, got=%d", db.minSeq()) + } + db.releaseSnapshot(e1) + if db.minSeq() != 2 { + t.Fatalf("invalid sequence number, got=%d", db.minSeq()) + } + db.releaseSnapshot(e2) + if db.minSeq() != 2 { + t.Fatalf("invalid sequence number, got=%d", db.minSeq()) + } +} + +func TestDB_HiddenValuesAreRemoved(t *testing.T) { + trun(t, func(h *dbHarness) { + s := h.db.s + + m := 2 + h.db.memdbMaxLevel = m + + h.put("foo", "v1") + h.compactMem() + v := s.version() + num := v.tLen(m) + v.release() + if num != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, num) + } + + // Place a table at level last-1 to prevent merging with preceding mutation + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + v = s.version() + if v.tLen(m) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) + } + if v.tLen(m-1) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) + } + v.release() + + h.delete("foo") + h.put("foo", "v2") + h.allEntriesFor("foo", "[ v2, DEL, v1 ]") + h.compactMem() + h.allEntriesFor("foo", "[ v2, DEL, v1 ]") + h.compactRangeAt(m-2, "", "z") + // DEL eliminated, but v1 remains because we aren't compacting that level + // (DEL can be eliminated because v2 hides v1). + h.allEntriesFor("foo", "[ v2, v1 ]") + h.compactRangeAt(m-1, "", "") + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + h.allEntriesFor("foo", "[ v2 ]") + }) +} + +func TestDB_DeletionMarkers2(t *testing.T) { + h := newDbHarness(t) + defer h.close() + s := h.db.s + + m := 2 + h.db.memdbMaxLevel = m + + h.put("foo", "v1") + h.compactMem() + v := s.version() + num := v.tLen(m) + v.release() + if num != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, num) + } + + // Place a table at level last-1 to prevent merging with preceding mutation + h.put("a", "begin") + h.put("z", "end") + h.compactMem() + v = s.version() + if v.tLen(m) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m, v.tLen(m)) + } + if v.tLen(m-1) != 1 { + t.Errorf("invalid level-%d len, want=1 got=%d", m-1, v.tLen(m-1)) + } + v.release() + + h.delete("foo") + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactMem() // Moves to level last-2 + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactRangeAt(m-2, "", "") + // DEL kept: "last" file overlaps + h.allEntriesFor("foo", "[ DEL, v1 ]") + h.compactRangeAt(m-1, "", "") + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + h.allEntriesFor("foo", "[ ]") +} + +func TestDB_CompactionTableOpenError(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + OpenFilesCacheCapacity: -1, + }) + defer h.close() + + h.db.memdbMaxLevel = 2 + + im := 10 + jm := 10 + for r := 0; r < 2; r++ { + for i := 0; i < im; i++ { + for j := 0; j < jm; j++ { + h.put(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) + } + h.compactMem() + } + } + + if n := h.totalTables(); n != im*2 { + t.Errorf("total tables is %d, want %d", n, im*2) + } + + h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, errors.New("open error during table compaction")) + go h.db.CompactRange(util.Range{}) + if err := h.db.compTriggerWait(h.db.tcompCmdC); err != nil { + t.Log("compaction error: ", err) + } + h.closeDB0() + h.openDB() + h.stor.EmulateError(testutil.ModeOpen, storage.TypeTable, nil) + + for i := 0; i < im; i++ { + for j := 0; j < jm; j++ { + h.getVal(fmt.Sprintf("k%d,%d", i, j), fmt.Sprintf("v%d,%d", i, j)) + } + } +} + +func TestDB_OverlapInLevel0(t *testing.T) { + trun(t, func(h *dbHarness) { + h.db.memdbMaxLevel = 2 + + // Fill levels 1 and 2 to disable the pushing of new memtables to levels > 0. + h.put("100", "v100") + h.put("999", "v999") + h.compactMem() + h.delete("100") + h.delete("999") + h.compactMem() + h.tablesPerLevel("0,1,1") + + // Make files spanning the following ranges in level-0: + // files[0] 200 .. 900 + // files[1] 300 .. 500 + // Note that files are sorted by min key. + h.put("300", "v300") + h.put("500", "v500") + h.compactMem() + h.put("200", "v200") + h.put("600", "v600") + h.put("900", "v900") + h.compactMem() + h.tablesPerLevel("2,1,1") + + // Compact away the placeholder files we created initially + h.compactRangeAt(1, "", "") + h.compactRangeAt(2, "", "") + h.tablesPerLevel("2") + + // Do a memtable compaction. Before bug-fix, the compaction would + // not detect the overlap with level-0 files and would incorrectly place + // the deletion in a deeper level. + h.delete("600") + h.compactMem() + h.tablesPerLevel("3") + h.get("600", false) + }) +} + +func TestDB_L0_CompactionBug_Issue44_a(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.reopenDB() + h.put("b", "v") + h.reopenDB() + h.delete("b") + h.delete("a") + h.reopenDB() + h.delete("a") + h.reopenDB() + h.put("a", "v") + h.reopenDB() + h.reopenDB() + h.getKeyVal("(a->v)") + h.waitCompaction() + h.getKeyVal("(a->v)") +} + +func TestDB_L0_CompactionBug_Issue44_b(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.reopenDB() + h.put("", "") + h.reopenDB() + h.delete("e") + h.put("", "") + h.reopenDB() + h.put("c", "cv") + h.reopenDB() + h.put("", "") + h.reopenDB() + h.put("", "") + h.waitCompaction() + h.reopenDB() + h.put("d", "dv") + h.reopenDB() + h.put("", "") + h.reopenDB() + h.delete("d") + h.delete("b") + h.reopenDB() + h.getKeyVal("(->)(c->cv)") + h.waitCompaction() + h.getKeyVal("(->)(c->cv)") +} + +func TestDB_SingleEntryMemCompaction(t *testing.T) { + trun(t, func(h *dbHarness) { + for i := 0; i < 10; i++ { + h.put("big", strings.Repeat("v", opt.DefaultWriteBuffer)) + h.compactMem() + h.put("key", strings.Repeat("v", opt.DefaultBlockSize)) + h.compactMem() + h.put("k", "v") + h.compactMem() + h.put("", "") + h.compactMem() + h.put("verybig", strings.Repeat("v", opt.DefaultWriteBuffer*2)) + h.compactMem() + } + }) +} + +func TestDB_ManifestWriteError(t *testing.T) { + for i := 0; i < 2; i++ { + func() { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "bar") + h.getVal("foo", "bar") + + // Mem compaction (will succeed) + h.compactMem() + h.getVal("foo", "bar") + v := h.db.s.version() + if n := v.tLen(0); n != 1 { + t.Errorf("invalid total tables, want=1 got=%d", n) + } + v.release() + + if i == 0 { + h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, errors.New("manifest write error")) + } else { + h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, errors.New("manifest sync error")) + } + + // Merging compaction (will fail) + h.compactRangeAtErr(0, "", "", true) + + h.db.Close() + h.stor.EmulateError(testutil.ModeWrite, storage.TypeManifest, nil) + h.stor.EmulateError(testutil.ModeSync, storage.TypeManifest, nil) + + // Should not lose data + h.openDB() + h.getVal("foo", "bar") + }() + } +} + +func assertErr(t *testing.T, err error, wanterr bool) { + if err != nil { + if wanterr { + t.Log("AssertErr: got error (expected): ", err) + } else { + t.Error("AssertErr: got error: ", err) + } + } else if wanterr { + t.Error("AssertErr: expect error") + } +} + +func TestDB_ClosedIsClosed(t *testing.T) { + h := newDbHarness(t) + db := h.db + + var iter, iter2 iterator.Iterator + var snap *Snapshot + func() { + defer h.close() + + h.put("k", "v") + h.getVal("k", "v") + + iter = db.NewIterator(nil, h.ro) + iter.Seek([]byte("k")) + testKeyVal(t, iter, "k->v") + + var err error + snap, err = db.GetSnapshot() + if err != nil { + t.Fatal("GetSnapshot: got error: ", err) + } + + h.getValr(snap, "k", "v") + + iter2 = snap.NewIterator(nil, h.ro) + iter2.Seek([]byte("k")) + testKeyVal(t, iter2, "k->v") + + h.put("foo", "v2") + h.delete("foo") + + // closing DB + iter.Release() + iter2.Release() + }() + + assertErr(t, db.Put([]byte("x"), []byte("y"), h.wo), true) + _, err := db.Get([]byte("k"), h.ro) + assertErr(t, err, true) + + if iter.Valid() { + t.Errorf("iter.Valid should false") + } + assertErr(t, iter.Error(), false) + testKeyVal(t, iter, "->") + if iter.Seek([]byte("k")) { + t.Errorf("iter.Seek should false") + } + assertErr(t, iter.Error(), true) + + assertErr(t, iter2.Error(), false) + + _, err = snap.Get([]byte("k"), h.ro) + assertErr(t, err, true) + + _, err = db.GetSnapshot() + assertErr(t, err, true) + + iter3 := db.NewIterator(nil, h.ro) + assertErr(t, iter3.Error(), true) + + iter3 = snap.NewIterator(nil, h.ro) + assertErr(t, iter3.Error(), true) + + assertErr(t, db.Delete([]byte("k"), h.wo), true) + + _, err = db.GetProperty("leveldb.stats") + assertErr(t, err, true) + + _, err = db.SizeOf([]util.Range{{[]byte("a"), []byte("z")}}) + assertErr(t, err, true) + + assertErr(t, db.CompactRange(util.Range{}), true) + + assertErr(t, db.Close(), true) +} + +type numberComparer struct{} + +func (numberComparer) num(x []byte) (n int) { + fmt.Sscan(string(x[1:len(x)-1]), &n) + return +} + +func (numberComparer) Name() string { + return "test.NumberComparer" +} + +func (p numberComparer) Compare(a, b []byte) int { + return p.num(a) - p.num(b) +} + +func (numberComparer) Separator(dst, a, b []byte) []byte { return nil } +func (numberComparer) Successor(dst, b []byte) []byte { return nil } + +func TestDB_CustomComparer(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Comparer: numberComparer{}, + WriteBuffer: 1000, + }) + defer h.close() + + h.put("[10]", "ten") + h.put("[0x14]", "twenty") + for i := 0; i < 2; i++ { + h.getVal("[10]", "ten") + h.getVal("[0xa]", "ten") + h.getVal("[20]", "twenty") + h.getVal("[0x14]", "twenty") + h.get("[15]", false) + h.get("[0xf]", false) + h.compactMem() + h.compactRange("[0]", "[9999]") + } + + for n := 0; n < 2; n++ { + for i := 0; i < 100; i++ { + v := fmt.Sprintf("[%d]", i*10) + h.put(v, v) + } + h.compactMem() + h.compactRange("[0]", "[1000000]") + } +} + +func TestDB_ManualCompaction(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.db.memdbMaxLevel = 2 + + h.putMulti(3, "p", "q") + h.tablesPerLevel("1,1,1") + + // Compaction range falls before files + h.compactRange("", "c") + h.tablesPerLevel("1,1,1") + + // Compaction range falls after files + h.compactRange("r", "z") + h.tablesPerLevel("1,1,1") + + // Compaction range overlaps files + h.compactRange("p1", "p9") + h.tablesPerLevel("0,0,1") + + // Populate a different range + h.putMulti(3, "c", "e") + h.tablesPerLevel("1,1,2") + + // Compact just the new range + h.compactRange("b", "f") + h.tablesPerLevel("0,0,2") + + // Compact all + h.putMulti(1, "a", "z") + h.tablesPerLevel("0,1,2") + h.compactRange("", "") + h.tablesPerLevel("0,0,1") +} + +func TestDB_BloomFilter(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + DisableBlockCache: true, + Filter: filter.NewBloomFilter(10), + }) + defer h.close() + + key := func(i int) string { + return fmt.Sprintf("key%06d", i) + } + + const n = 10000 + + // Populate multiple layers + for i := 0; i < n; i++ { + h.put(key(i), key(i)) + } + h.compactMem() + h.compactRange("a", "z") + for i := 0; i < n; i += 100 { + h.put(key(i), key(i)) + } + h.compactMem() + + // Prevent auto compactions triggered by seeks + h.stor.Stall(testutil.ModeSync, storage.TypeTable) + + // Lookup present keys. Should rarely read from small sstable. + h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable) + for i := 0; i < n; i++ { + h.getVal(key(i), key(i)) + } + cnt, _ := h.stor.Counter(testutil.ModeRead, storage.TypeTable) + t.Logf("lookup of %d present keys yield %d sstable I/O reads", n, cnt) + if min, max := n, n+2*n/100; cnt < min || cnt > max { + t.Errorf("num of sstable I/O reads of present keys not in range of %d - %d, got %d", min, max, cnt) + } + + // Lookup missing keys. Should rarely read from either sstable. + h.stor.ResetCounter(testutil.ModeRead, storage.TypeTable) + for i := 0; i < n; i++ { + h.get(key(i)+".missing", false) + } + cnt, _ = h.stor.Counter(testutil.ModeRead, storage.TypeTable) + t.Logf("lookup of %d missing keys yield %d sstable I/O reads", n, cnt) + if max := 3 * n / 100; cnt > max { + t.Errorf("num of sstable I/O reads of missing keys was more than %d, got %d", max, cnt) + } + + h.stor.Release(testutil.ModeSync, storage.TypeTable) +} + +func TestDB_Concurrent(t *testing.T) { + const n, secs, maxkey = 4, 6, 1000 + h := newDbHarness(t) + defer h.close() + + runtime.GOMAXPROCS(runtime.NumCPU()) + + var ( + closeWg sync.WaitGroup + stop uint32 + cnt [n]uint32 + ) + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + var put, get, found uint + defer func() { + t.Logf("goroutine %d stopped after %d ops, put=%d get=%d found=%d missing=%d", + i, cnt[i], put, get, found, get-found) + closeWg.Done() + }() + + rnd := rand.New(rand.NewSource(int64(1000 + i))) + for atomic.LoadUint32(&stop) == 0 { + x := cnt[i] + + k := rnd.Intn(maxkey) + kstr := fmt.Sprintf("%016d", k) + + if (rnd.Int() % 2) > 0 { + put++ + h.put(kstr, fmt.Sprintf("%d.%d.%-1000d", k, i, x)) + } else { + get++ + v, err := h.db.Get([]byte(kstr), h.ro) + if err == nil { + found++ + rk, ri, rx := 0, -1, uint32(0) + fmt.Sscanf(string(v), "%d.%d.%d", &rk, &ri, &rx) + if rk != k { + t.Errorf("invalid key want=%d got=%d", k, rk) + } + if ri < 0 || ri >= n { + t.Error("invalid goroutine number: ", ri) + } else { + tx := atomic.LoadUint32(&(cnt[ri])) + if rx > tx { + t.Errorf("invalid seq number, %d > %d ", rx, tx) + } + } + } else if err != ErrNotFound { + t.Error("Get: got error: ", err) + return + } + } + atomic.AddUint32(&cnt[i], 1) + } + }(i) + } + + time.Sleep(secs * time.Second) + atomic.StoreUint32(&stop, 1) + closeWg.Wait() +} + +func TestDB_ConcurrentIterator(t *testing.T) { + const n, n2 = 4, 1000 + h := newDbHarnessWopt(t, &opt.Options{DisableLargeBatchTransaction: true, WriteBuffer: 30}) + defer h.close() + + runtime.GOMAXPROCS(runtime.NumCPU()) + + var ( + closeWg sync.WaitGroup + stop uint32 + ) + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 0; atomic.LoadUint32(&stop) == 0; k++ { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } + + for i := 0; i < n; i++ { + closeWg.Add(1) + go func(i int) { + for k := 1000000; k < 0 || atomic.LoadUint32(&stop) == 0; k-- { + h.put(fmt.Sprintf("k%d", k), fmt.Sprintf("%d.%d.", k, i)+strings.Repeat("x", 10)) + } + closeWg.Done() + }(i) + } + + cmp := comparer.DefaultComparer + for i := 0; i < n2; i++ { + closeWg.Add(1) + go func(i int) { + it := h.db.NewIterator(nil, nil) + var pk []byte + for it.Next() { + kk := it.Key() + if cmp.Compare(kk, pk) <= 0 { + t.Errorf("iter %d: %q is successor of %q", i, pk, kk) + } + pk = append(pk[:0], kk...) + var k, vk, vi int + if n, err := fmt.Sscanf(string(it.Key()), "k%d", &k); err != nil { + t.Errorf("iter %d: Scanf error on key %q: %v", i, it.Key(), err) + } else if n < 1 { + t.Errorf("iter %d: Cannot parse key %q", i, it.Key()) + } + if n, err := fmt.Sscanf(string(it.Value()), "%d.%d", &vk, &vi); err != nil { + t.Errorf("iter %d: Scanf error on value %q: %v", i, it.Value(), err) + } else if n < 2 { + t.Errorf("iter %d: Cannot parse value %q", i, it.Value()) + } + + if vk != k { + t.Errorf("iter %d: invalid value i=%d, want=%d got=%d", i, vi, k, vk) + } + } + if err := it.Error(); err != nil { + t.Errorf("iter %d: Got error: %v", i, err) + } + it.Release() + closeWg.Done() + }(i) + } + + atomic.StoreUint32(&stop, 1) + closeWg.Wait() +} + +func TestDB_ConcurrentWrite(t *testing.T) { + const n, niter = 10, 10000 + h := newDbHarness(t) + defer h.close() + + runtime.GOMAXPROCS(runtime.NumCPU()) + + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + for k := 0; k < niter; k++ { + kstr := fmt.Sprintf("%d.%d", i, k) + vstr := fmt.Sprintf("v%d", k) + h.put(kstr, vstr) + // Key should immediately available after put returns. + h.getVal(kstr, vstr) + } + }(i) + } + wg.Wait() +} + +func TestDB_CreateReopenDbOnFile(t *testing.T) { + dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile-%d", os.Getuid())) + if err := os.RemoveAll(dbpath); err != nil { + t.Fatal("cannot remove old db: ", err) + } + defer os.RemoveAll(dbpath) + + for i := 0; i < 3; i++ { + stor, err := storage.OpenFile(dbpath, false) + if err != nil { + t.Fatalf("(%d) cannot open storage: %s", i, err) + } + db, err := Open(stor, nil) + if err != nil { + t.Fatalf("(%d) cannot open db: %s", i, err) + } + if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { + t.Fatalf("(%d) cannot write to db: %s", i, err) + } + if err := db.Close(); err != nil { + t.Fatalf("(%d) cannot close db: %s", i, err) + } + if err := stor.Close(); err != nil { + t.Fatalf("(%d) cannot close storage: %s", i, err) + } + } +} + +func TestDB_CreateReopenDbOnFile2(t *testing.T) { + dbpath := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldbtestCreateReopenDbOnFile2-%d", os.Getuid())) + if err := os.RemoveAll(dbpath); err != nil { + t.Fatal("cannot remove old db: ", err) + } + defer os.RemoveAll(dbpath) + + for i := 0; i < 3; i++ { + db, err := OpenFile(dbpath, nil) + if err != nil { + t.Fatalf("(%d) cannot open db: %s", i, err) + } + if err := db.Put([]byte("foo"), []byte("bar"), nil); err != nil { + t.Fatalf("(%d) cannot write to db: %s", i, err) + } + if err := db.Close(); err != nil { + t.Fatalf("(%d) cannot close db: %s", i, err) + } + } +} + +func TestDB_DeletionMarkersOnMemdb(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "v1") + h.compactMem() + h.delete("foo") + h.get("foo", false) + h.getKeyVal("") +} + +func TestDB_LeveldbIssue178(t *testing.T) { + nKeys := (opt.DefaultCompactionTableSize / 30) * 5 + key1 := func(i int) string { + return fmt.Sprintf("my_key_%d", i) + } + key2 := func(i int) string { + return fmt.Sprintf("my_key_%d_xxx", i) + } + + // Disable compression since it affects the creation of layers and the + // code below is trying to test against a very specific scenario. + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + }) + defer h.close() + + // Create first key range. + batch := new(Batch) + for i := 0; i < nKeys; i++ { + batch.Put([]byte(key1(i)), []byte("value for range 1 key")) + } + h.write(batch) + + // Create second key range. + batch.Reset() + for i := 0; i < nKeys; i++ { + batch.Put([]byte(key2(i)), []byte("value for range 2 key")) + } + h.write(batch) + + // Delete second key range. + batch.Reset() + for i := 0; i < nKeys; i++ { + batch.Delete([]byte(key2(i))) + } + h.write(batch) + h.waitMemCompaction() + + // Run manual compaction. + h.compactRange(key1(0), key1(nKeys-1)) + + // Checking the keys. + h.assertNumKeys(nKeys) +} + +func TestDB_LeveldbIssue200(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("1", "b") + h.put("2", "c") + h.put("3", "d") + h.put("4", "e") + h.put("5", "f") + + iter := h.db.NewIterator(nil, h.ro) + + // Add an element that should not be reflected in the iterator. + h.put("25", "cd") + + iter.Seek([]byte("5")) + assertBytes(t, []byte("5"), iter.Key()) + iter.Prev() + assertBytes(t, []byte("4"), iter.Key()) + iter.Prev() + assertBytes(t, []byte("3"), iter.Key()) + iter.Next() + assertBytes(t, []byte("4"), iter.Key()) + iter.Next() + assertBytes(t, []byte("5"), iter.Key()) +} + +func TestDB_GoleveldbIssue74(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 1 * opt.MiB, + }) + defer h.close() + + const n, dur = 10000, 5 * time.Second + + runtime.GOMAXPROCS(runtime.NumCPU()) + + until := time.Now().Add(dur) + wg := new(sync.WaitGroup) + wg.Add(2) + var done uint32 + go func() { + var i int + defer func() { + t.Logf("WRITER DONE #%d", i) + atomic.StoreUint32(&done, 1) + wg.Done() + }() + + b := new(Batch) + for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { + iv := fmt.Sprintf("VAL%010d", i) + for k := 0; k < n; k++ { + key := fmt.Sprintf("KEY%06d", k) + b.Put([]byte(key), []byte(key+iv)) + b.Put([]byte(fmt.Sprintf("PTR%06d", k)), []byte(key)) + } + h.write(b) + + b.Reset() + snap := h.getSnapshot() + iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) + var k int + for ; iter.Next(); k++ { + ptrKey := iter.Key() + key := iter.Value() + + if _, err := snap.Get(ptrKey, nil); err != nil { + t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, ptrKey, err) + } + if value, err := snap.Get(key, nil); err != nil { + t.Fatalf("WRITER #%d snapshot.Get %q: %v", i, key, err) + } else if string(value) != string(key)+iv { + t.Fatalf("WRITER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+iv, value) + } + + b.Delete(key) + b.Delete(ptrKey) + } + h.write(b) + iter.Release() + snap.Release() + if k != n { + t.Fatalf("#%d %d != %d", i, k, n) + } + } + }() + go func() { + var i int + defer func() { + t.Logf("READER DONE #%d", i) + atomic.StoreUint32(&done, 1) + wg.Done() + }() + for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { + snap := h.getSnapshot() + iter := snap.NewIterator(util.BytesPrefix([]byte("PTR")), nil) + var prevValue string + var k int + for ; iter.Next(); k++ { + ptrKey := iter.Key() + key := iter.Value() + + if _, err := snap.Get(ptrKey, nil); err != nil { + t.Fatalf("READER #%d snapshot.Get %q: %v", i, ptrKey, err) + } + + if value, err := snap.Get(key, nil); err != nil { + t.Fatalf("READER #%d snapshot.Get %q: %v", i, key, err) + } else if prevValue != "" && string(value) != string(key)+prevValue { + t.Fatalf("READER #%d snapshot.Get %q got invalid value, want %q got %q", i, key, string(key)+prevValue, value) + } else { + prevValue = string(value[len(key):]) + } + } + iter.Release() + snap.Release() + if k > 0 && k != n { + t.Fatalf("#%d %d != %d", i, k, n) + } + } + }() + wg.Wait() +} + +func TestDB_GetProperties(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + _, err := h.db.GetProperty("leveldb.num-files-at-level") + if err == nil { + t.Error("GetProperty() failed to detect missing level") + } + + _, err = h.db.GetProperty("leveldb.num-files-at-level0") + if err != nil { + t.Error("got unexpected error", err) + } + + _, err = h.db.GetProperty("leveldb.num-files-at-level0x") + if err == nil { + t.Error("GetProperty() failed to detect invalid level") + } +} + +func TestDB_GoleveldbIssue72and83(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 1 * opt.MiB, + OpenFilesCacheCapacity: 3, + }) + defer h.close() + + const n, wn, dur = 10000, 100, 30 * time.Second + + runtime.GOMAXPROCS(runtime.NumCPU()) + + randomData := func(prefix byte, i int) []byte { + data := make([]byte, 1+4+32+64+32) + _, err := crand.Reader.Read(data[1 : len(data)-8]) + if err != nil { + panic(err) + } + data[0] = prefix + binary.LittleEndian.PutUint32(data[len(data)-8:], uint32(i)) + binary.LittleEndian.PutUint32(data[len(data)-4:], util.NewCRC(data[:len(data)-4]).Value()) + return data + } + + keys := make([][]byte, n) + for i := range keys { + keys[i] = randomData(1, 0) + } + + until := time.Now().Add(dur) + wg := new(sync.WaitGroup) + wg.Add(3) + var done uint32 + go func() { + i := 0 + defer func() { + t.Logf("WRITER DONE #%d", i) + wg.Done() + }() + + b := new(Batch) + for ; i < wn && atomic.LoadUint32(&done) == 0; i++ { + b.Reset() + for _, k1 := range keys { + k2 := randomData(2, i) + b.Put(k2, randomData(42, i)) + b.Put(k1, k2) + } + if err := h.db.Write(b, h.wo); err != nil { + atomic.StoreUint32(&done, 1) + t.Fatalf("WRITER #%d db.Write: %v", i, err) + } + } + }() + go func() { + var i int + defer func() { + t.Logf("READER0 DONE #%d", i) + atomic.StoreUint32(&done, 1) + wg.Done() + }() + for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { + snap := h.getSnapshot() + seq := snap.elem.seq + if seq == 0 { + snap.Release() + continue + } + iter := snap.NewIterator(util.BytesPrefix([]byte{1}), nil) + writei := int(seq/(n*2) - 1) + var k int + for ; iter.Next(); k++ { + k1 := iter.Key() + k2 := iter.Value() + k1checksum0 := binary.LittleEndian.Uint32(k1[len(k1)-4:]) + k1checksum1 := util.NewCRC(k1[:len(k1)-4]).Value() + if k1checksum0 != k1checksum1 { + t.Fatalf("READER0 #%d.%d W#%d invalid K1 checksum: %#x != %#x", i, k, k1checksum0, k1checksum0) + } + k2checksum0 := binary.LittleEndian.Uint32(k2[len(k2)-4:]) + k2checksum1 := util.NewCRC(k2[:len(k2)-4]).Value() + if k2checksum0 != k2checksum1 { + t.Fatalf("READER0 #%d.%d W#%d invalid K2 checksum: %#x != %#x", i, k, k2checksum0, k2checksum1) + } + kwritei := int(binary.LittleEndian.Uint32(k2[len(k2)-8:])) + if writei != kwritei { + t.Fatalf("READER0 #%d.%d W#%d invalid write iteration num: %d", i, k, writei, kwritei) + } + if _, err := snap.Get(k2, nil); err != nil { + t.Fatalf("READER0 #%d.%d W#%d snap.Get: %v\nk1: %x\n -> k2: %x", i, k, writei, err, k1, k2) + } + } + if err := iter.Error(); err != nil { + t.Fatalf("READER0 #%d.%d W#%d snap.Iterator: %v", i, k, writei, err) + } + iter.Release() + snap.Release() + if k > 0 && k != n { + t.Fatalf("READER0 #%d W#%d short read, got=%d want=%d", i, writei, k, n) + } + } + }() + go func() { + var i int + defer func() { + t.Logf("READER1 DONE #%d", i) + atomic.StoreUint32(&done, 1) + wg.Done() + }() + for ; time.Now().Before(until) && atomic.LoadUint32(&done) == 0; i++ { + iter := h.db.NewIterator(nil, nil) + seq := iter.(*dbIter).seq + if seq == 0 { + iter.Release() + continue + } + writei := int(seq/(n*2) - 1) + var k int + for ok := iter.Last(); ok; ok = iter.Prev() { + k++ + } + if err := iter.Error(); err != nil { + t.Fatalf("READER1 #%d.%d W#%d db.Iterator: %v", i, k, writei, err) + } + iter.Release() + if m := (writei+1)*n + n; k != m { + t.Fatalf("READER1 #%d W#%d short read, got=%d want=%d", i, writei, k, m) + } + } + }() + + wg.Wait() +} + +func TestDB_TransientError(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 128 * opt.KiB, + OpenFilesCacheCapacity: 3, + DisableCompactionBackoff: true, + }) + defer h.close() + + const ( + nSnap = 20 + nKey = 10000 + ) + + var ( + snaps [nSnap]*Snapshot + b = &Batch{} + ) + for i := range snaps { + vtail := fmt.Sprintf("VAL%030d", i) + b.Reset() + for k := 0; k < nKey; k++ { + key := fmt.Sprintf("KEY%8d", k) + b.Put([]byte(key), []byte(key+vtail)) + } + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, errors.New("table transient read error")) + if err := h.db.Write(b, nil); err != nil { + t.Logf("WRITE #%d error: %v", i, err) + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) + for { + if err := h.db.Write(b, nil); err == nil { + break + } else if errors.IsCorrupted(err) { + t.Fatalf("WRITE #%d corrupted: %v", i, err) + } + } + } + + snaps[i] = h.db.newSnapshot() + b.Reset() + for k := 0; k < nKey; k++ { + key := fmt.Sprintf("KEY%8d", k) + b.Delete([]byte(key)) + } + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, errors.New("table transient read error")) + if err := h.db.Write(b, nil); err != nil { + t.Logf("WRITE #%d error: %v", i, err) + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) + for { + if err := h.db.Write(b, nil); err == nil { + break + } else if errors.IsCorrupted(err) { + t.Fatalf("WRITE #%d corrupted: %v", i, err) + } + } + } + } + h.stor.EmulateError(testutil.ModeOpen|testutil.ModeRead, storage.TypeTable, nil) + + runtime.GOMAXPROCS(runtime.NumCPU()) + + rnd := rand.New(rand.NewSource(0xecafdaed)) + wg := &sync.WaitGroup{} + for i, snap := range snaps { + wg.Add(2) + + go func(i int, snap *Snapshot, sk []int) { + defer wg.Done() + + vtail := fmt.Sprintf("VAL%030d", i) + for _, k := range sk { + key := fmt.Sprintf("KEY%8d", k) + xvalue, err := snap.Get([]byte(key), nil) + if err != nil { + t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) + } + value := key + vtail + if !bytes.Equal([]byte(value), xvalue) { + t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) + } + } + }(i, snap, rnd.Perm(nKey)) + + go func(i int, snap *Snapshot) { + defer wg.Done() + + vtail := fmt.Sprintf("VAL%030d", i) + iter := snap.NewIterator(nil, nil) + defer iter.Release() + for k := 0; k < nKey; k++ { + if !iter.Next() { + if err := iter.Error(); err != nil { + t.Fatalf("READER_ITER #%d K%d error: %v", i, k, err) + } else { + t.Fatalf("READER_ITER #%d K%d eoi", i, k) + } + } + key := fmt.Sprintf("KEY%8d", k) + xkey := iter.Key() + if !bytes.Equal([]byte(key), xkey) { + t.Fatalf("READER_ITER #%d K%d invalid key: want %q, got %q", i, k, key, xkey) + } + value := key + vtail + xvalue := iter.Value() + if !bytes.Equal([]byte(value), xvalue) { + t.Fatalf("READER_ITER #%d K%d invalid value: want %q, got %q", i, k, value, xvalue) + } + } + }(i, snap) + } + + wg.Wait() +} + +func TestDB_UkeyShouldntHopAcrossTable(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 112 * opt.KiB, + CompactionTableSize: 90 * opt.KiB, + CompactionExpandLimitFactor: 1, + }) + defer h.close() + + const ( + nSnap = 190 + nKey = 140 + ) + + var ( + snaps [nSnap]*Snapshot + b = &Batch{} + ) + for i := range snaps { + vtail := fmt.Sprintf("VAL%030d", i) + b.Reset() + for k := 0; k < nKey; k++ { + key := fmt.Sprintf("KEY%08d", k) + b.Put([]byte(key), []byte(key+vtail)) + } + if err := h.db.Write(b, nil); err != nil { + t.Fatalf("WRITE #%d error: %v", i, err) + } + + snaps[i] = h.db.newSnapshot() + b.Reset() + for k := 0; k < nKey; k++ { + key := fmt.Sprintf("KEY%08d", k) + b.Delete([]byte(key)) + } + if err := h.db.Write(b, nil); err != nil { + t.Fatalf("WRITE #%d error: %v", i, err) + } + } + + h.compactMem() + + h.waitCompaction() + for level, tables := range h.db.s.stVersion.levels { + for _, table := range tables { + t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) + } + } + + h.compactRangeAt(0, "", "") + h.waitCompaction() + for level, tables := range h.db.s.stVersion.levels { + for _, table := range tables { + t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) + } + } + h.compactRangeAt(1, "", "") + h.waitCompaction() + for level, tables := range h.db.s.stVersion.levels { + for _, table := range tables { + t.Logf("L%d@%d %q:%q", level, table.fd.Num, table.imin, table.imax) + } + } + runtime.GOMAXPROCS(runtime.NumCPU()) + + wg := &sync.WaitGroup{} + for i, snap := range snaps { + wg.Add(1) + + go func(i int, snap *Snapshot) { + defer wg.Done() + + vtail := fmt.Sprintf("VAL%030d", i) + for k := 0; k < nKey; k++ { + key := fmt.Sprintf("KEY%08d", k) + xvalue, err := snap.Get([]byte(key), nil) + if err != nil { + t.Fatalf("READER_GET #%d SEQ=%d K%d error: %v", i, snap.elem.seq, k, err) + } + value := key + vtail + if !bytes.Equal([]byte(value), xvalue) { + t.Fatalf("READER_GET #%d SEQ=%d K%d invalid value: want %q, got %q", i, snap.elem.seq, k, value, xvalue) + } + } + }(i, snap) + } + + wg.Wait() +} + +func TestDB_TableCompactionBuilder(t *testing.T) { + gomega.RegisterTestingT(t) + stor := testutil.NewStorage() + stor.OnLog(testingLogger(t)) + stor.OnClose(testingPreserveOnFailed(t)) + defer stor.Close() + + const nSeq = 99 + + o := &opt.Options{ + DisableLargeBatchTransaction: true, + WriteBuffer: 112 * opt.KiB, + CompactionTableSize: 43 * opt.KiB, + CompactionExpandLimitFactor: 1, + CompactionGPOverlapsFactor: 1, + DisableBlockCache: true, + } + s, err := newSession(stor, o) + if err != nil { + t.Fatal(err) + } + if err := s.create(); err != nil { + t.Fatal(err) + } + defer s.close() + var ( + seq uint64 + targetSize = 5 * o.CompactionTableSize + value = bytes.Repeat([]byte{'0'}, 100) + ) + for i := 0; i < 2; i++ { + tw, err := s.tops.create() + if err != nil { + t.Fatal(err) + } + for k := 0; tw.tw.BytesLen() < targetSize; k++ { + key := []byte(fmt.Sprintf("%09d", k)) + seq += nSeq - 1 + for x := uint64(0); x < nSeq; x++ { + if err := tw.append(makeInternalKey(nil, key, seq-x, keyTypeVal), value); err != nil { + t.Fatal(err) + } + } + } + tf, err := tw.finish() + if err != nil { + t.Fatal(err) + } + rec := &sessionRecord{} + rec.addTableFile(i, tf) + if err := s.commit(rec); err != nil { + t.Fatal(err) + } + } + + // Build grandparent. + v := s.version() + c := newCompaction(s, v, 1, append(tFiles{}, v.levels[1]...)) + rec := &sessionRecord{} + b := &tableCompactionBuilder{ + s: s, + c: c, + rec: rec, + stat1: new(cStatStaging), + minSeq: 0, + strict: true, + tableSize: o.CompactionTableSize/3 + 961, + } + if err := b.run(new(compactionTransactCounter)); err != nil { + t.Fatal(err) + } + for _, t := range c.levels[0] { + rec.delTable(c.sourceLevel, t.fd.Num) + } + if err := s.commit(rec); err != nil { + t.Fatal(err) + } + c.release() + + // Build level-1. + v = s.version() + c = newCompaction(s, v, 0, append(tFiles{}, v.levels[0]...)) + rec = &sessionRecord{} + b = &tableCompactionBuilder{ + s: s, + c: c, + rec: rec, + stat1: new(cStatStaging), + minSeq: 0, + strict: true, + tableSize: o.CompactionTableSize, + } + if err := b.run(new(compactionTransactCounter)); err != nil { + t.Fatal(err) + } + for _, t := range c.levels[0] { + rec.delTable(c.sourceLevel, t.fd.Num) + } + // Move grandparent to level-3 + for _, t := range v.levels[2] { + rec.delTable(2, t.fd.Num) + rec.addTableFile(3, t) + } + if err := s.commit(rec); err != nil { + t.Fatal(err) + } + c.release() + + v = s.version() + for level, want := range []bool{false, true, false, true} { + got := len(v.levels[level]) > 0 + if want != got { + t.Fatalf("invalid level-%d tables len: want %v, got %v", level, want, got) + } + } + for i, f := range v.levels[1][:len(v.levels[1])-1] { + nf := v.levels[1][i+1] + if bytes.Equal(f.imax.ukey(), nf.imin.ukey()) { + t.Fatalf("KEY %q hop across table %d .. %d", f.imax.ukey(), f.fd.Num, nf.fd.Num) + } + } + v.release() + + // Compaction with transient error. + v = s.version() + c = newCompaction(s, v, 1, append(tFiles{}, v.levels[1]...)) + rec = &sessionRecord{} + b = &tableCompactionBuilder{ + s: s, + c: c, + rec: rec, + stat1: new(cStatStaging), + minSeq: 0, + strict: true, + tableSize: o.CompactionTableSize, + } + stor.EmulateErrorOnce(testutil.ModeSync, storage.TypeTable, errors.New("table sync error (once)")) + stor.EmulateRandomError(testutil.ModeRead|testutil.ModeWrite, storage.TypeTable, 0.01, errors.New("table random IO error")) + for { + if err := b.run(new(compactionTransactCounter)); err != nil { + t.Logf("(expected) b.run: %v", err) + } else { + break + } + } + if err := s.commit(rec); err != nil { + t.Fatal(err) + } + c.release() + + stor.EmulateErrorOnce(testutil.ModeSync, storage.TypeTable, nil) + stor.EmulateRandomError(testutil.ModeRead|testutil.ModeWrite, storage.TypeTable, 0, nil) + + v = s.version() + if len(v.levels[1]) != len(v.levels[2]) { + t.Fatalf("invalid tables length, want %d, got %d", len(v.levels[1]), len(v.levels[2])) + } + for i, f0 := range v.levels[1] { + f1 := v.levels[2][i] + iter0 := s.tops.newIterator(f0, nil, nil) + iter1 := s.tops.newIterator(f1, nil, nil) + for j := 0; true; j++ { + next0 := iter0.Next() + next1 := iter1.Next() + if next0 != next1 { + t.Fatalf("#%d.%d invalid eoi: want %v, got %v", i, j, next0, next1) + } + key0 := iter0.Key() + key1 := iter1.Key() + if !bytes.Equal(key0, key1) { + t.Fatalf("#%d.%d invalid key: want %q, got %q", i, j, key0, key1) + } + if next0 == false { + break + } + } + iter0.Release() + iter1.Release() + } + v.release() +} + +func testDB_IterTriggeredCompaction(t *testing.T, limitDiv int) { + const ( + vSize = 200 * opt.KiB + tSize = 100 * opt.MiB + mIter = 100 + n = tSize / vSize + ) + + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + DisableBlockCache: true, + }) + defer h.close() + + h.db.memdbMaxLevel = 2 + + key := func(x int) string { + return fmt.Sprintf("v%06d", x) + } + + // Fill. + value := strings.Repeat("x", vSize) + for i := 0; i < n; i++ { + h.put(key(i), value) + } + h.compactMem() + + // Delete all. + for i := 0; i < n; i++ { + h.delete(key(i)) + } + h.compactMem() + + var ( + limit = n / limitDiv + + startKey = key(0) + limitKey = key(limit) + maxKey = key(n) + slice = &util.Range{Limit: []byte(limitKey)} + + initialSize0 = h.sizeOf(startKey, limitKey) + initialSize1 = h.sizeOf(limitKey, maxKey) + ) + + t.Logf("inital size %s [rest %s]", shortenb(int(initialSize0)), shortenb(int(initialSize1))) + + for r := 0; true; r++ { + if r >= mIter { + t.Fatal("taking too long to compact") + } + + // Iterates. + iter := h.db.NewIterator(slice, h.ro) + for iter.Next() { + } + if err := iter.Error(); err != nil { + t.Fatalf("Iter err: %v", err) + } + iter.Release() + + // Wait compaction. + h.waitCompaction() + + // Check size. + size0 := h.sizeOf(startKey, limitKey) + size1 := h.sizeOf(limitKey, maxKey) + t.Logf("#%03d size %s [rest %s]", r, shortenb(int(size0)), shortenb(int(size1))) + if size0 < initialSize0/10 { + break + } + } + + if initialSize1 > 0 { + h.sizeAssert(limitKey, maxKey, initialSize1/4-opt.MiB, initialSize1+opt.MiB) + } +} + +func TestDB_IterTriggeredCompaction(t *testing.T) { + testDB_IterTriggeredCompaction(t, 1) +} + +func TestDB_IterTriggeredCompactionHalf(t *testing.T) { + testDB_IterTriggeredCompaction(t, 2) +} + +func TestDB_ReadOnly(t *testing.T) { + h := newDbHarness(t) + defer h.close() + + h.put("foo", "v1") + h.put("bar", "v2") + h.compactMem() + + h.put("xfoo", "v1") + h.put("xbar", "v2") + + t.Log("Trigger read-only") + if err := h.db.SetReadOnly(); err != nil { + h.close() + t.Fatalf("SetReadOnly error: %v", err) + } + + mode := testutil.ModeCreate | testutil.ModeRemove | testutil.ModeRename | testutil.ModeWrite | testutil.ModeSync + h.stor.EmulateError(mode, storage.TypeAll, errors.New("read-only DB shouldn't writes")) + + ro := func(key, value, wantValue string) { + if err := h.db.Put([]byte(key), []byte(value), h.wo); err != ErrReadOnly { + t.Fatalf("unexpected error: %v", err) + } + h.getVal(key, wantValue) + } + + ro("foo", "vx", "v1") + + h.o.ReadOnly = true + h.reopenDB() + + ro("foo", "vx", "v1") + ro("bar", "vx", "v2") + h.assertNumKeys(4) +} + +func TestDB_BulkInsertDelete(t *testing.T) { + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + CompactionTableSize: 128 * opt.KiB, + CompactionTotalSize: 1 * opt.MiB, + WriteBuffer: 256 * opt.KiB, + }) + defer h.close() + + const R = 100 + const N = 2500 + key := make([]byte, 4) + value := make([]byte, 256) + for i := 0; i < R; i++ { + offset := N * i + for j := 0; j < N; j++ { + binary.BigEndian.PutUint32(key, uint32(offset+j)) + h.db.Put(key, value, nil) + } + for j := 0; j < N; j++ { + binary.BigEndian.PutUint32(key, uint32(offset+j)) + h.db.Delete(key, nil) + } + } + + if tot := h.totalTables(); tot > 10 { + t.Fatalf("too many uncompacted tables: %d (%s)", tot, h.getTablesPerLevel()) + } +} + +func TestDB_GracefulClose(t *testing.T) { + runtime.GOMAXPROCS(4) + h := newDbHarnessWopt(t, &opt.Options{ + DisableLargeBatchTransaction: true, + Compression: opt.NoCompression, + CompactionTableSize: 1 * opt.MiB, + WriteBuffer: 1 * opt.MiB, + }) + defer h.close() + + var closeWait sync.WaitGroup + + // During write. + n := 0 + closing := false + for i := 0; i < 1000000; i++ { + if !closing && h.totalTables() > 3 { + t.Logf("close db during write, index=%d", i) + closeWait.Add(1) + go func() { + h.closeDB() + closeWait.Done() + }() + closing = true + } + if err := h.db.Put([]byte(fmt.Sprintf("%09d", i)), []byte(fmt.Sprintf("VAL-%09d", i)), h.wo); err != nil { + t.Logf("Put error: %s (expected)", err) + n = i + break + } + } + closeWait.Wait() + + // During read. + h.openDB() + closing = false + for i := 0; i < n; i++ { + if !closing && i > n/2 { + t.Logf("close db during read, index=%d", i) + closeWait.Add(1) + go func() { + h.closeDB() + closeWait.Done() + }() + closing = true + } + if _, err := h.db.Get([]byte(fmt.Sprintf("%09d", i)), h.ro); err != nil { + t.Logf("Get error: %s (expected)", err) + break + } + } + closeWait.Wait() + + // During iterate. + h.openDB() + closing = false + iter := h.db.NewIterator(nil, h.ro) + for i := 0; iter.Next(); i++ { + if len(iter.Key()) == 0 || len(iter.Value()) == 0 { + t.Error("Key or value has zero length") + } + if !closing { + t.Logf("close db during iter, index=%d", i) + closeWait.Add(1) + go func() { + h.closeDB() + closeWait.Done() + }() + closing = true + } + time.Sleep(time.Millisecond) + } + if err := iter.Error(); err != nil { + t.Logf("Iter error: %s (expected)", err) + } + iter.Release() + closeWait.Wait() +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/external_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/external_test.go new file mode 100644 index 0000000000..4bc3a91044 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/external_test.go @@ -0,0 +1,117 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Leveldb external", func() { + o := &opt.Options{ + DisableBlockCache: true, + BlockRestartInterval: 5, + BlockSize: 80, + Compression: opt.NoCompression, + OpenFilesCacheCapacity: -1, + Strict: opt.StrictAll, + WriteBuffer: 1000, + CompactionTableSize: 2000, + } + + Describe("write test", func() { + It("should do write correctly", func(done Done) { + db := newTestingDB(o, nil, nil) + t := testutil.DBTesting{ + DB: db, + Deleted: testutil.KeyValue_Generate(nil, 500, 1, 50, 5, 5).Clone(), + } + testutil.DoDBTesting(&t) + db.TestClose() + done <- true + }, 20.0) + }) + + Describe("read test", func() { + testutil.AllKeyValueTesting(nil, nil, func(kv testutil.KeyValue) testutil.DB { + // Building the DB. + db := newTestingDB(o, nil, nil) + kv.IterateShuffled(nil, func(i int, key, value []byte) { + err := db.TestPut(key, value) + Expect(err).NotTo(HaveOccurred()) + }) + + return db + }, func(db testutil.DB) { + db.(*testingDB).TestClose() + }) + }) + + Describe("transaction test", func() { + It("should do transaction correctly", func(done Done) { + db := newTestingDB(o, nil, nil) + + By("creating first transaction") + var err error + tr := &testingTransaction{} + tr.Transaction, err = db.OpenTransaction() + Expect(err).NotTo(HaveOccurred()) + t0 := &testutil.DBTesting{ + DB: tr, + Deleted: testutil.KeyValue_Generate(nil, 200, 1, 50, 5, 5).Clone(), + } + testutil.DoDBTesting(t0) + testutil.TestGet(tr, t0.Present) + testutil.TestHas(tr, t0.Present) + + By("committing first transaction") + err = tr.Commit() + Expect(err).NotTo(HaveOccurred()) + testutil.TestIter(db, nil, t0.Present) + testutil.TestGet(db, t0.Present) + testutil.TestHas(db, t0.Present) + + By("manipulating DB without transaction") + t0.DB = db + testutil.DoDBTesting(t0) + + By("creating second transaction") + tr.Transaction, err = db.OpenTransaction() + Expect(err).NotTo(HaveOccurred()) + t1 := &testutil.DBTesting{ + DB: tr, + Deleted: t0.Deleted.Clone(), + Present: t0.Present.Clone(), + } + testutil.DoDBTesting(t1) + testutil.TestIter(db, nil, t0.Present) + + By("discarding second transaction") + tr.Discard() + testutil.TestIter(db, nil, t0.Present) + + By("creating third transaction") + tr.Transaction, err = db.OpenTransaction() + Expect(err).NotTo(HaveOccurred()) + t0.DB = tr + testutil.DoDBTesting(t0) + + By("committing third transaction") + err = tr.Commit() + Expect(err).NotTo(HaveOccurred()) + testutil.TestIter(db, nil, t0.Present) + + db.TestClose() + done <- true + }, 30.0) + }) + }) +}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go new file mode 100644 index 0000000000..1fb56f0713 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom_test.go @@ -0,0 +1,142 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package filter + +import ( + "encoding/binary" + "github.com/syndtr/goleveldb/leveldb/util" + "testing" +) + +type harness struct { + t *testing.T + + bloom Filter + generator FilterGenerator + filter []byte +} + +func newHarness(t *testing.T) *harness { + bloom := NewBloomFilter(10) + return &harness{ + t: t, + bloom: bloom, + generator: bloom.NewGenerator(), + } +} + +func (h *harness) add(key []byte) { + h.generator.Add(key) +} + +func (h *harness) addNum(key uint32) { + var b [4]byte + binary.LittleEndian.PutUint32(b[:], key) + h.add(b[:]) +} + +func (h *harness) build() { + b := &util.Buffer{} + h.generator.Generate(b) + h.filter = b.Bytes() +} + +func (h *harness) reset() { + h.filter = nil +} + +func (h *harness) filterLen() int { + return len(h.filter) +} + +func (h *harness) assert(key []byte, want, silent bool) bool { + got := h.bloom.Contains(h.filter, key) + if !silent && got != want { + h.t.Errorf("assert on '%v' failed got '%v', want '%v'", key, got, want) + } + return got +} + +func (h *harness) assertNum(key uint32, want, silent bool) bool { + var b [4]byte + binary.LittleEndian.PutUint32(b[:], key) + return h.assert(b[:], want, silent) +} + +func TestBloomFilter_Empty(t *testing.T) { + h := newHarness(t) + h.build() + h.assert([]byte("hello"), false, false) + h.assert([]byte("world"), false, false) +} + +func TestBloomFilter_Small(t *testing.T) { + h := newHarness(t) + h.add([]byte("hello")) + h.add([]byte("world")) + h.build() + h.assert([]byte("hello"), true, false) + h.assert([]byte("world"), true, false) + h.assert([]byte("x"), false, false) + h.assert([]byte("foo"), false, false) +} + +func nextN(n int) int { + switch { + case n < 10: + n += 1 + case n < 100: + n += 10 + case n < 1000: + n += 100 + default: + n += 1000 + } + return n +} + +func TestBloomFilter_VaryingLengths(t *testing.T) { + h := newHarness(t) + var mediocre, good int + for n := 1; n < 10000; n = nextN(n) { + h.reset() + for i := 0; i < n; i++ { + h.addNum(uint32(i)) + } + h.build() + + got := h.filterLen() + want := (n * 10 / 8) + 40 + if got > want { + t.Errorf("filter len test failed, '%d' > '%d'", got, want) + } + + for i := 0; i < n; i++ { + h.assertNum(uint32(i), true, false) + } + + var rate float32 + for i := 0; i < 10000; i++ { + if h.assertNum(uint32(i+1000000000), true, true) { + rate++ + } + } + rate /= 10000 + if rate > 0.02 { + t.Errorf("false positive rate is more than 2%%, got %v, at len %d", rate, n) + } + if rate > 0.0125 { + mediocre++ + } else { + good++ + } + } + t.Logf("false positive rate: %d good, %d mediocre", good, mediocre) + if mediocre > good/5 { + t.Error("mediocre false positive rate is more than expected") + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go new file mode 100644 index 0000000000..1ed6d07cbb --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter_test.go @@ -0,0 +1,30 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + . "github.com/onsi/ginkgo" + + . "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Array iterator", func() { + It("Should iterates and seeks correctly", func() { + // Build key/value. + kv := testutil.KeyValue_Generate(nil, 70, 1, 5, 3, 3) + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewArrayIterator(kv), + } + testutil.DoIteratorTesting(&t) + }) + }) +}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go new file mode 100644 index 0000000000..72a7978924 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter_test.go @@ -0,0 +1,83 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + "sort" + + . "github.com/onsi/ginkgo" + + "github.com/syndtr/goleveldb/leveldb/comparer" + . "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +type keyValue struct { + key []byte + testutil.KeyValue +} + +type keyValueIndex []keyValue + +func (x keyValueIndex) Search(key []byte) int { + return sort.Search(x.Len(), func(i int) bool { + return comparer.DefaultComparer.Compare(x[i].key, key) >= 0 + }) +} + +func (x keyValueIndex) Len() int { return len(x) } +func (x keyValueIndex) Index(i int) (key, value []byte) { return x[i].key, nil } +func (x keyValueIndex) Get(i int) Iterator { return NewArrayIterator(x[i]) } + +var _ = testutil.Defer(func() { + Describe("Indexed iterator", func() { + Test := func(n ...int) func() { + if len(n) == 0 { + rnd := testutil.NewRand() + n = make([]int, rnd.Intn(17)+3) + for i := range n { + n[i] = rnd.Intn(19) + 1 + } + } + + return func() { + It("Should iterates and seeks correctly", func(done Done) { + // Build key/value. + index := make(keyValueIndex, len(n)) + sum := 0 + for _, x := range n { + sum += x + } + kv := testutil.KeyValue_Generate(nil, sum, 1, 10, 4, 4) + for i, j := 0, 0; i < len(n); i++ { + for x := n[i]; x > 0; x-- { + key, value := kv.Index(j) + index[i].key = key + index[i].Put(key, value) + j++ + } + } + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewIndexedIterator(NewArrayIndexer(index), true), + } + testutil.DoIteratorTesting(&t) + done <- true + }, 1.5) + } + } + + Describe("with 100 keys", Test(100)) + Describe("with 50-50 keys", Test(50, 50)) + Describe("with 50-1 keys", Test(50, 1)) + Describe("with 50-1-50 keys", Test(50, 1, 50)) + Describe("with 1-50 keys", Test(1, 50)) + Describe("with random N-keys", Test()) + }) +}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go new file mode 100644 index 0000000000..5ef8d5bafb --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter_suite_test.go @@ -0,0 +1,11 @@ +package iterator_test + +import ( + "testing" + + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestIterator(t *testing.T) { + testutil.RunSuite(t, "Iterator Suite") +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go new file mode 100644 index 0000000000..e523b63e4b --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter_test.go @@ -0,0 +1,60 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package iterator_test + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/comparer" + . "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +var _ = testutil.Defer(func() { + Describe("Merged iterator", func() { + Test := func(filled int, empty int) func() { + return func() { + It("Should iterates and seeks correctly", func(done Done) { + rnd := testutil.NewRand() + + // Build key/value. + filledKV := make([]testutil.KeyValue, filled) + kv := testutil.KeyValue_Generate(nil, 100, 1, 10, 4, 4) + kv.Iterate(func(i int, key, value []byte) { + filledKV[rnd.Intn(filled)].Put(key, value) + }) + + // Create itearators. + iters := make([]Iterator, filled+empty) + for i := range iters { + if empty == 0 || (rnd.Int()%2 == 0 && filled > 0) { + filled-- + Expect(filledKV[filled].Len()).ShouldNot(BeZero()) + iters[i] = NewArrayIterator(filledKV[filled]) + } else { + empty-- + iters[i] = NewEmptyIterator(nil) + } + } + + // Test the iterator. + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: NewMergedIterator(iters, comparer.DefaultComparer, true), + } + testutil.DoIteratorTesting(&t) + done <- true + }, 1.5) + } + } + + Describe("with three, all filled iterators", Test(3, 0)) + Describe("with one filled, one empty iterators", Test(1, 1)) + Describe("with one filled, two empty iterators", Test(1, 2)) + }) +}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go new file mode 100644 index 0000000000..0fcf22599f --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal_test.go @@ -0,0 +1,818 @@ +// Copyright 2011 The LevelDB-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record_test.go?r=df1fa28f7f3be6c3935548169002309c12967135 +// License, authors and contributors informations can be found at bellow URLs respectively: +// https://code.google.com/p/leveldb-go/source/browse/LICENSE +// https://code.google.com/p/leveldb-go/source/browse/AUTHORS +// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS + +package journal + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math/rand" + "strings" + "testing" +) + +type dropper struct { + t *testing.T +} + +func (d dropper) Drop(err error) { + d.t.Log(err) +} + +func short(s string) string { + if len(s) < 64 { + return s + } + return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) +} + +// big returns a string of length n, composed of repetitions of partial. +func big(partial string, n int) string { + return strings.Repeat(partial, n/len(partial)+1)[:n] +} + +func TestEmpty(t *testing.T) { + buf := new(bytes.Buffer) + r := NewReader(buf, dropper{t}, true, true) + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { + buf := new(bytes.Buffer) + + reset() + w := NewWriter(buf) + for { + s, ok := gen() + if !ok { + break + } + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write([]byte(s)); err != nil { + t.Fatal(err) + } + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + reset() + r := NewReader(buf, dropper{t}, true, true) + for { + s, ok := gen() + if !ok { + break + } + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + x, err := ioutil.ReadAll(rr) + if err != nil { + t.Fatal(err) + } + if string(x) != s { + t.Fatalf("got %q, want %q", short(string(x)), short(s)) + } + } + if _, err := r.Next(); err != io.EOF { + t.Fatalf("got %v, want %v", err, io.EOF) + } +} + +func testLiterals(t *testing.T, s []string) { + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == len(s) { + return "", false + } + i++ + return s[i-1], true + } + testGenerator(t, reset, gen) +} + +func TestMany(t *testing.T) { + const n = 1e5 + var i int + reset := func() { + i = 0 + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return fmt.Sprintf("%d.", i-1), true + } + testGenerator(t, reset, gen) +} + +func TestRandom(t *testing.T) { + const n = 1e2 + var ( + i int + r *rand.Rand + ) + reset := func() { + i, r = 0, rand.New(rand.NewSource(0)) + } + gen := func() (string, bool) { + if i == n { + return "", false + } + i++ + return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true + } + testGenerator(t, reset, gen) +} + +func TestBasic(t *testing.T) { + testLiterals(t, []string{ + strings.Repeat("a", 1000), + strings.Repeat("b", 97270), + strings.Repeat("c", 8000), + }) +} + +func TestBoundary(t *testing.T) { + for i := blockSize - 16; i < blockSize+16; i++ { + s0 := big("abcd", i) + for j := blockSize - 16; j < blockSize+16; j++ { + s1 := big("ABCDE", j) + testLiterals(t, []string{s0, s1}) + testLiterals(t, []string{s0, "", s1}) + testLiterals(t, []string{s0, "x", s1}) + } + } +} + +func TestFlush(t *testing.T) { + buf := new(bytes.Buffer) + w := NewWriter(buf) + // Write a couple of records. Everything should still be held + // in the record.Writer buffer, so that buf.Len should be 0. + w0, _ := w.Next() + w0.Write([]byte("0")) + w1, _ := w.Next() + w1.Write([]byte("11")) + if got, want := buf.Len(), 0; got != want { + t.Fatalf("buffer length #0: got %d want %d", got, want) + } + // Flush the record.Writer buffer, which should yield 17 bytes. + // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #1: got %d want %d", got, want) + } + // Do another write, one that isn't large enough to complete the block. + // The write should not have flowed through to buf. + w2, _ := w.Next() + w2.Write(bytes.Repeat([]byte("2"), 10000)) + if got, want := buf.Len(), 17; got != want { + t.Fatalf("buffer length #2: got %d want %d", got, want) + } + // Flushing should get us up to 10024 bytes written. + // 10024 = 17 + 7 + 10000. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 10024; got != want { + t.Fatalf("buffer length #3: got %d want %d", got, want) + } + // Do a bigger write, one that completes the current block. + // We should now have 32768 bytes (a complete block), without + // an explicit flush. + w3, _ := w.Next() + w3.Write(bytes.Repeat([]byte("3"), 40000)) + if got, want := buf.Len(), 32768; got != want { + t.Fatalf("buffer length #4: got %d want %d", got, want) + } + // Flushing should get us up to 50038 bytes written. + // 50038 = 10024 + 2*7 + 40000. There are two headers because + // the one record was split into two chunks. + if err := w.Flush(); err != nil { + t.Fatal(err) + } + if got, want := buf.Len(), 50038; got != want { + t.Fatalf("buffer length #5: got %d want %d", got, want) + } + // Check that reading those records give the right lengths. + r := NewReader(buf, dropper{t}, true, true) + wants := []int64{1, 2, 10000, 40000} + for i, want := range wants { + rr, _ := r.Next() + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #%d: %v", i, err) + } + if n != want { + t.Fatalf("read #%d: got %d bytes want %d", i, n, want) + } + } +} + +func TestNonExhaustiveRead(t *testing.T) { + const n = 100 + buf := new(bytes.Buffer) + p := make([]byte, 10) + rnd := rand.New(rand.NewSource(1)) + + w := NewWriter(buf) + for i := 0; i < n; i++ { + length := len(p) + rnd.Intn(3*blockSize) + s := string(uint8(i)) + "123456789abcdefgh" + ww, _ := w.Next() + ww.Write([]byte(big(s, length))) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf, dropper{t}, true, true) + for i := 0; i < n; i++ { + rr, _ := r.Next() + _, err := io.ReadFull(rr, p) + if err != nil { + t.Fatal(err) + } + want := string(uint8(i)) + "123456789" + if got := string(p); got != want { + t.Fatalf("read #%d: got %q want %q", i, got, want) + } + } +} + +func TestStaleReader(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w0.Write([]byte("0")) + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1.Write([]byte("11")) + if err := w.Close(); err != nil { + t.Fatal(err) + } + + r := NewReader(buf, dropper{t}, true, true) + r0, err := r.Next() + if err != nil { + t.Fatal(err) + } + r1, err := r.Next() + if err != nil { + t.Fatal(err) + } + p := make([]byte, 1) + if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale read #0: unexpected error: %v", err) + } + if _, err := r1.Read(p); err != nil { + t.Fatalf("fresh read #1: got %v want nil error", err) + } + if p[0] != '1' { + t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) + } +} + +func TestStaleWriter(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + w0, err := w.Next() + if err != nil { + t.Fatal(err) + } + w1, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := w0.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #0: unexpected error: %v", err) + } + if _, err := w1.Write([]byte("11")); err != nil { + t.Fatalf("fresh write #1: got %v want nil error", err) + } + if err := w.Flush(); err != nil { + t.Fatalf("flush: %v", err) + } + if _, err := w1.Write([]byte("0")); err == nil || !strings.Contains(err.Error(), "stale") { + t.Fatalf("stale write #1: unexpected error: %v", err) + } +} + +func TestCorrupt_MissingLastBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-1024)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + // Cut the last block. + b := buf.Bytes()[:blockSize] + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read. + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if n != blockSize-1024 { + t.Fatalf("read #0: got %d bytes want %d", n, blockSize-1024) + } + + // Second read. + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedFirstBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #0. + for i := 0; i < 1024; i++ { + b[i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (third record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize-headerSize) + 2; n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedMiddleBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #1. + for i := 0; i < 1024; i++ { + b[blockSize+i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + // Third read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #2: %v", err) + } + if want := int64(blockSize-headerSize) + 2; n != want { + t.Fatalf("read #2: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_CorruptedLastBlock(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + // Fourth record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+2)); err != nil { + t.Fatalf("write #3: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting block #3. + for i := len(b) - 1; i > len(b)-1024; i-- { + b[i] = '1' + } + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize - headerSize); n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + // Third read (third record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #2: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #2: got %d bytes want %d", n, want) + } + + // Fourth read (fourth record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #3: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_FirstChuckLengthOverflow(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting record #1. + x := blockSize + binary.LittleEndian.PutUint16(b[x+4:], 0xffff) + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (second record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != io.ErrUnexpectedEOF { + t.Fatalf("read #1: unexpected error: %v", err) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} + +func TestCorrupt_MiddleChuckLengthOverflow(t *testing.T) { + buf := new(bytes.Buffer) + + w := NewWriter(buf) + + // First record. + ww, err := w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize/2)); err != nil { + t.Fatalf("write #0: unexpected error: %v", err) + } + + // Second record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), blockSize-headerSize)); err != nil { + t.Fatalf("write #1: unexpected error: %v", err) + } + + // Third record. + ww, err = w.Next() + if err != nil { + t.Fatal(err) + } + if _, err := ww.Write(bytes.Repeat([]byte("0"), (blockSize-headerSize)+1)); err != nil { + t.Fatalf("write #2: unexpected error: %v", err) + } + + if err := w.Close(); err != nil { + t.Fatal(err) + } + + b := buf.Bytes() + // Corrupting record #1. + x := blockSize/2 + headerSize + binary.LittleEndian.PutUint16(b[x+4:], 0xffff) + + r := NewReader(bytes.NewReader(b), dropper{t}, false, true) + + // First read (first record). + rr, err := r.Next() + if err != nil { + t.Fatal(err) + } + n, err := io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #0: %v", err) + } + if want := int64(blockSize / 2); n != want { + t.Fatalf("read #0: got %d bytes want %d", n, want) + } + + // Second read (third record). + rr, err = r.Next() + if err != nil { + t.Fatal(err) + } + n, err = io.Copy(ioutil.Discard, rr) + if err != nil { + t.Fatalf("read #1: %v", err) + } + if want := int64(blockSize-headerSize) + 1; n != want { + t.Fatalf("read #1: got %d bytes want %d", n, want) + } + + if _, err := r.Next(); err != io.EOF { + t.Fatalf("last next: unexpected error: %v", err) + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/key_test.go new file mode 100644 index 0000000000..2f33ccb880 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/key_test.go @@ -0,0 +1,133 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" + + "github.com/syndtr/goleveldb/leveldb/comparer" +) + +var defaultIComparer = &iComparer{comparer.DefaultComparer} + +func ikey(key string, seq uint64, kt keyType) internalKey { + return makeInternalKey(nil, []byte(key), uint64(seq), kt) +} + +func shortSep(a, b []byte) []byte { + dst := make([]byte, len(a)) + dst = defaultIComparer.Separator(dst[:0], a, b) + if dst == nil { + return a + } + return dst +} + +func shortSuccessor(b []byte) []byte { + dst := make([]byte, len(b)) + dst = defaultIComparer.Successor(dst[:0], b) + if dst == nil { + return b + } + return dst +} + +func testSingleKey(t *testing.T, key string, seq uint64, kt keyType) { + ik := ikey(key, seq, kt) + + if !bytes.Equal(ik.ukey(), []byte(key)) { + t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) + } + + rseq, rt := ik.parseNum() + if rseq != seq { + t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) + } + if rt != kt { + t.Errorf("type does not equal, got %v, want %v", rt, kt) + } + + if rukey, rseq, rt, kerr := parseInternalKey(ik); kerr == nil { + if !bytes.Equal(rukey, []byte(key)) { + t.Errorf("user key does not equal, got %v, want %v", string(ik.ukey()), key) + } + if rseq != seq { + t.Errorf("seq number does not equal, got %v, want %v", rseq, seq) + } + if rt != kt { + t.Errorf("type does not equal, got %v, want %v", rt, kt) + } + } else { + t.Errorf("key error: %v", kerr) + } +} + +func TestInternalKey_EncodeDecode(t *testing.T) { + keys := []string{"", "k", "hello", "longggggggggggggggggggggg"} + seqs := []uint64{ + 1, 2, 3, + (1 << 8) - 1, 1 << 8, (1 << 8) + 1, + (1 << 16) - 1, 1 << 16, (1 << 16) + 1, + (1 << 32) - 1, 1 << 32, (1 << 32) + 1, + } + for _, key := range keys { + for _, seq := range seqs { + testSingleKey(t, key, seq, keyTypeVal) + testSingleKey(t, "hello", 1, keyTypeDel) + } + } +} + +func assertBytes(t *testing.T, want, got []byte) { + if !bytes.Equal(got, want) { + t.Errorf("assert failed, got %v, want %v", got, want) + } +} + +func TestInternalKeyShortSeparator(t *testing.T) { + // When user keys are same + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 99, keyTypeVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 101, keyTypeVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 100, keyTypeVal))) + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foo", 100, keyTypeDel))) + + // When user keys are misordered + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("bar", 99, keyTypeVal))) + + // When user keys are different, but correctly ordered + assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("hello", 200, keyTypeVal))) + + // When start user key is prefix of limit user key + assertBytes(t, ikey("foo", 100, keyTypeVal), + shortSep(ikey("foo", 100, keyTypeVal), + ikey("foobar", 200, keyTypeVal))) + + // When limit user key is prefix of start user key + assertBytes(t, ikey("foobar", 100, keyTypeVal), + shortSep(ikey("foobar", 100, keyTypeVal), + ikey("foo", 200, keyTypeVal))) +} + +func TestInternalKeyShortestSuccessor(t *testing.T) { + assertBytes(t, ikey("g", uint64(keyMaxSeq), keyTypeSeek), + shortSuccessor(ikey("foo", 100, keyTypeVal))) + assertBytes(t, ikey("\xff\xff", 100, keyTypeVal), + shortSuccessor(ikey("\xff\xff", 100, keyTypeVal))) +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go new file mode 100644 index 0000000000..fefa007a70 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/leveldb_suite_test.go @@ -0,0 +1,11 @@ +package leveldb + +import ( + "testing" + + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestLevelDB(t *testing.T) { + testutil.RunSuite(t, "LevelDB Suite") +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go new file mode 100644 index 0000000000..b05084caa6 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/bench_test.go @@ -0,0 +1,75 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package memdb + +import ( + "encoding/binary" + "math/rand" + "testing" + + "github.com/syndtr/goleveldb/leveldb/comparer" +) + +func BenchmarkPut(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + b.ResetTimer() + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } +} + +func BenchmarkPutRandom(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(rand.Int())) + } + + b.ResetTimer() + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } +} + +func BenchmarkGet(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } + + b.ResetTimer() + for i := range buf { + p.Get(buf[i][:]) + } +} + +func BenchmarkGetRandom(b *testing.B) { + buf := make([][4]byte, b.N) + for i := range buf { + binary.LittleEndian.PutUint32(buf[i][:], uint32(i)) + } + + p := New(comparer.DefaultComparer, 0) + for i := range buf { + p.Put(buf[i][:], nil) + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + p.Get(buf[rand.Int()%b.N][:]) + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go new file mode 100644 index 0000000000..18c304b7f1 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_suite_test.go @@ -0,0 +1,11 @@ +package memdb + +import ( + "testing" + + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestMemDB(t *testing.T) { + testutil.RunSuite(t, "MemDB Suite") +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go new file mode 100644 index 0000000000..5dd6dbc7b7 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb_test.go @@ -0,0 +1,135 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package memdb + +import ( + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/syndtr/goleveldb/leveldb/util" +) + +func (p *DB) TestFindLT(key []byte) (rkey, value []byte, err error) { + p.mu.RLock() + if node := p.findLT(key); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +func (p *DB) TestFindLast() (rkey, value []byte, err error) { + p.mu.RLock() + if node := p.findLast(); node != 0 { + n := p.nodeData[node] + m := n + p.nodeData[node+nKey] + rkey = p.kvData[n:m] + value = p.kvData[m : m+p.nodeData[node+nVal]] + } else { + err = ErrNotFound + } + p.mu.RUnlock() + return +} + +func (p *DB) TestPut(key []byte, value []byte) error { + p.Put(key, value) + return nil +} + +func (p *DB) TestDelete(key []byte) error { + p.Delete(key) + return nil +} + +func (p *DB) TestFind(key []byte) (rkey, rvalue []byte, err error) { + return p.Find(key) +} + +func (p *DB) TestGet(key []byte) (value []byte, err error) { + return p.Get(key) +} + +func (p *DB) TestNewIterator(slice *util.Range) iterator.Iterator { + return p.NewIterator(slice) +} + +var _ = testutil.Defer(func() { + Describe("Memdb", func() { + Describe("write test", func() { + It("should do write correctly", func() { + db := New(comparer.DefaultComparer, 0) + t := testutil.DBTesting{ + DB: db, + Deleted: testutil.KeyValue_Generate(nil, 1000, 1, 30, 5, 5).Clone(), + PostFn: func(t *testutil.DBTesting) { + Expect(db.Len()).Should(Equal(t.Present.Len())) + Expect(db.Size()).Should(Equal(t.Present.Size())) + switch t.Act { + case testutil.DBPut, testutil.DBOverwrite: + Expect(db.Contains(t.ActKey)).Should(BeTrue()) + default: + Expect(db.Contains(t.ActKey)).Should(BeFalse()) + } + }, + } + testutil.DoDBTesting(&t) + }) + }) + + Describe("read test", func() { + testutil.AllKeyValueTesting(nil, func(kv testutil.KeyValue) testutil.DB { + // Building the DB. + db := New(comparer.DefaultComparer, 0) + kv.IterateShuffled(nil, func(i int, key, value []byte) { + db.Put(key, value) + }) + + if kv.Len() > 1 { + It("Should find correct keys with findLT", func() { + testutil.ShuffledIndex(nil, kv.Len()-1, 1, func(i int) { + key_, key, _ := kv.IndexInexact(i + 1) + expectedKey, expectedValue := kv.Index(i) + + // Using key that exist. + rkey, rvalue, err := db.TestFindLT(key) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q -> %q", key, expectedKey) + Expect(rkey).Should(Equal(expectedKey), "Key") + Expect(rvalue).Should(Equal(expectedValue), "Value for key %q -> %q", key, expectedKey) + + // Using key that doesn't exist. + rkey, rvalue, err = db.TestFindLT(key_) + Expect(err).ShouldNot(HaveOccurred(), "Error for key %q (%q) -> %q", key_, key, expectedKey) + Expect(rkey).Should(Equal(expectedKey)) + Expect(rvalue).Should(Equal(expectedValue), "Value for key %q (%q) -> %q", key_, key, expectedKey) + }) + }) + } + + if kv.Len() > 0 { + It("Should find last key with findLast", func() { + key, value := kv.Index(kv.Len() - 1) + rkey, rvalue, err := db.TestFindLast() + Expect(err).ShouldNot(HaveOccurred()) + Expect(rkey).Should(Equal(key)) + Expect(rvalue).Should(Equal(value)) + }) + } + + return db + }, nil, nil) + }) + }) +}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go new file mode 100644 index 0000000000..5af399f063 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record_test.go @@ -0,0 +1,62 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + "bytes" + "testing" +) + +func decodeEncode(v *sessionRecord) (res bool, err error) { + b := new(bytes.Buffer) + err = v.encode(b) + if err != nil { + return + } + v2 := &sessionRecord{} + err = v.decode(b) + if err != nil { + return + } + b2 := new(bytes.Buffer) + err = v2.encode(b2) + if err != nil { + return + } + return bytes.Equal(b.Bytes(), b2.Bytes()), nil +} + +func TestSessionRecord_EncodeDecode(t *testing.T) { + big := int64(1) << 50 + v := &sessionRecord{} + i := int64(0) + test := func() { + res, err := decodeEncode(v) + if err != nil { + t.Fatalf("error when testing encode/decode sessionRecord: %v", err) + } + if !res { + t.Error("encode/decode test failed at iteration:", i) + } + } + + for ; i < 4; i++ { + test() + v.addTable(3, big+300+i, big+400+i, + makeInternalKey(nil, []byte("foo"), uint64(big+500+1), keyTypeVal), + makeInternalKey(nil, []byte("zoo"), uint64(big+600+1), keyTypeDel)) + v.delTable(4, big+700+i) + v.addCompPtr(int(i), makeInternalKey(nil, []byte("x"), uint64(big+900+1), keyTypeVal)) + } + + v.setComparer("foo") + v.setJournalNum(big + 100) + v.setPrevJournalNum(big + 99) + v.setNextFileNum(big + 200) + v.setSeqNum(uint64(big + 1000)) + test() +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go new file mode 100644 index 0000000000..7a77f28918 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_test.go @@ -0,0 +1,176 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "fmt" + "os" + "path/filepath" + "testing" +) + +var cases = []struct { + oldName []string + name string + ftype FileType + num int64 +}{ + {nil, "000100.log", TypeJournal, 100}, + {nil, "000000.log", TypeJournal, 0}, + {[]string{"000000.sst"}, "000000.ldb", TypeTable, 0}, + {nil, "MANIFEST-000002", TypeManifest, 2}, + {nil, "MANIFEST-000007", TypeManifest, 7}, + {nil, "9223372036854775807.log", TypeJournal, 9223372036854775807}, + {nil, "000100.tmp", TypeTemp, 100}, +} + +var invalidCases = []string{ + "", + "foo", + "foo-dx-100.log", + ".log", + "", + "manifest", + "CURREN", + "CURRENTX", + "MANIFES", + "MANIFEST", + "MANIFEST-", + "XMANIFEST-3", + "MANIFEST-3x", + "LOC", + "LOCKx", + "LO", + "LOGx", + "18446744073709551616.log", + "184467440737095516150.log", + "100", + "100.", + "100.lop", +} + +func TestFileStorage_CreateFileName(t *testing.T) { + for _, c := range cases { + if name := fsGenName(FileDesc{c.ftype, c.num}); name != c.name { + t.Errorf("invalid filename got '%s', want '%s'", name, c.name) + } + } +} + +func TestFileStorage_ParseFileName(t *testing.T) { + for _, c := range cases { + for _, name := range append([]string{c.name}, c.oldName...) { + fd, ok := fsParseName(name) + if !ok { + t.Errorf("cannot parse filename '%s'", name) + continue + } + if fd.Type != c.ftype { + t.Errorf("filename '%s' invalid type got '%d', want '%d'", name, fd.Type, c.ftype) + } + if fd.Num != c.num { + t.Errorf("filename '%s' invalid number got '%d', want '%d'", name, fd.Num, c.num) + } + } + } +} + +func TestFileStorage_InvalidFileName(t *testing.T) { + for _, name := range invalidCases { + if fsParseNamePtr(name, nil) { + t.Errorf("filename '%s' should be invalid", name) + } + } +} + +func TestFileStorage_Locking(t *testing.T) { + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrwlock-%d", os.Getuid())) + if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) { + t.Fatal("RemoveAll: got error: ", err) + } + defer os.RemoveAll(path) + + p1, err := OpenFile(path, false) + if err != nil { + t.Fatal("OpenFile(1): got error: ", err) + } + + p2, err := OpenFile(path, false) + if err != nil { + t.Logf("OpenFile(2): got error: %s (expected)", err) + } else { + p2.Close() + p1.Close() + t.Fatal("OpenFile(2): expect error") + } + + p1.Close() + + p3, err := OpenFile(path, false) + if err != nil { + t.Fatal("OpenFile(3): got error: ", err) + } + defer p3.Close() + + l, err := p3.Lock() + if err != nil { + t.Fatal("storage lock failed(1): ", err) + } + _, err = p3.Lock() + if err == nil { + t.Fatal("expect error for second storage lock attempt") + } else { + t.Logf("storage lock got error: %s (expected)", err) + } + l.Release() + _, err = p3.Lock() + if err != nil { + t.Fatal("storage lock failed(2): ", err) + } +} + +func TestFileStorage_ReadOnlyLocking(t *testing.T) { + path := filepath.Join(os.TempDir(), fmt.Sprintf("goleveldb-testrolock-%d", os.Getuid())) + if err := os.RemoveAll(path); err != nil && !os.IsNotExist(err) { + t.Fatal("RemoveAll: got error: ", err) + } + defer os.RemoveAll(path) + + p1, err := OpenFile(path, false) + if err != nil { + t.Fatal("OpenFile(1): got error: ", err) + } + + _, err = OpenFile(path, true) + if err != nil { + t.Logf("OpenFile(2): got error: %s (expected)", err) + } else { + t.Fatal("OpenFile(2): expect error") + } + + p1.Close() + + p3, err := OpenFile(path, true) + if err != nil { + t.Fatal("OpenFile(3): got error: ", err) + } + + p4, err := OpenFile(path, true) + if err != nil { + t.Fatal("OpenFile(4): got error: ", err) + } + + _, err = OpenFile(path, false) + if err != nil { + t.Logf("OpenFile(5): got error: %s (expected)", err) + } else { + t.Fatal("OpenFile(2): expect error") + } + + p3.Close() + p4.Close() +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go new file mode 100644 index 0000000000..7295075866 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage_test.go @@ -0,0 +1,65 @@ +// Copyright (c) 2013, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package storage + +import ( + "bytes" + "testing" +) + +func TestMemStorage(t *testing.T) { + m := NewMemStorage() + + l, err := m.Lock() + if err != nil { + t.Fatal("storage lock failed(1): ", err) + } + _, err = m.Lock() + if err == nil { + t.Fatal("expect error for second storage lock attempt") + } else { + t.Logf("storage lock got error: %s (expected)", err) + } + l.Release() + _, err = m.Lock() + if err != nil { + t.Fatal("storage lock failed(2): ", err) + } + + w, err := m.Create(FileDesc{TypeTable, 1}) + if err != nil { + t.Fatal("Storage.Create: ", err) + } + w.Write([]byte("abc")) + w.Close() + if fds, _ := m.List(TypeAll); len(fds) != 1 { + t.Fatal("invalid GetFiles len") + } + buf := new(bytes.Buffer) + r, err := m.Open(FileDesc{TypeTable, 1}) + if err != nil { + t.Fatal("Open: got error: ", err) + } + buf.ReadFrom(r) + r.Close() + if got := buf.String(); got != "abc" { + t.Fatalf("Read: invalid value, want=abc got=%s", got) + } + if _, err := m.Open(FileDesc{TypeTable, 1}); err != nil { + t.Fatal("Open: got error: ", err) + } + if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil { + t.Fatal("expecting error") + } + m.Remove(FileDesc{TypeTable, 1}) + if fds, _ := m.List(TypeAll); len(fds) != 0 { + t.Fatal("invalid GetFiles len", len(fds)) + } + if _, err := m.Open(FileDesc{TypeTable, 1}); err == nil { + t.Fatal("expecting error") + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go new file mode 100644 index 0000000000..00e6f9eea0 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/block_test.go @@ -0,0 +1,139 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "encoding/binary" + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/comparer" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type blockTesting struct { + tr *Reader + b *block +} + +func (t *blockTesting) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.tr.newBlockIter(t.b, nil, slice, false) +} + +var _ = testutil.Defer(func() { + Describe("Block", func() { + Build := func(kv *testutil.KeyValue, restartInterval int) *blockTesting { + // Building the block. + bw := &blockWriter{ + restartInterval: restartInterval, + scratch: make([]byte, 30), + } + kv.Iterate(func(i int, key, value []byte) { + bw.append(key, value) + }) + bw.finish() + + // Opening the block. + data := bw.buf.Bytes() + restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:])) + return &blockTesting{ + tr: &Reader{cmp: comparer.DefaultComparer}, + b: &block{ + data: data, + restartsLen: restartsLen, + restartsOffset: len(data) - (restartsLen+1)*4, + }, + } + } + + Describe("read test", func() { + for restartInterval := 1; restartInterval <= 5; restartInterval++ { + Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { + kv := &testutil.KeyValue{} + Text := func() string { + return fmt.Sprintf("and %d keys", kv.Len()) + } + + Test := func() { + // Make block. + br := Build(kv, restartInterval) + // Do testing. + testutil.KeyValueTesting(nil, kv.Clone(), br, nil, nil) + } + + Describe(Text(), Test) + + kv.PutString("", "empty") + Describe(Text(), Test) + + kv.PutString("a1", "foo") + Describe(Text(), Test) + + kv.PutString("a2", "v") + Describe(Text(), Test) + + kv.PutString("a3qqwrkks", "hello") + Describe(Text(), Test) + + kv.PutString("a4", "bar") + Describe(Text(), Test) + + kv.PutString("a5111111", "v5") + kv.PutString("a6", "") + kv.PutString("a7", "v7") + kv.PutString("a8", "vvvvvvvvvvvvvvvvvvvvvv8") + kv.PutString("b", "v9") + kv.PutString("c9", "v9") + kv.PutString("c91", "v9") + kv.PutString("d0", "v9") + Describe(Text(), Test) + }) + } + }) + + Describe("out-of-bound slice test", func() { + kv := &testutil.KeyValue{} + kv.PutString("k1", "v1") + kv.PutString("k2", "v2") + kv.PutString("k3abcdefgg", "v3") + kv.PutString("k4", "v4") + kv.PutString("k5", "v5") + for restartInterval := 1; restartInterval <= 5; restartInterval++ { + Describe(fmt.Sprintf("with restart interval of %d", restartInterval), func() { + // Make block. + bt := Build(kv, restartInterval) + + Test := func(r *util.Range) func(done Done) { + return func(done Done) { + iter := bt.TestNewIterator(r) + Expect(iter.Error()).ShouldNot(HaveOccurred()) + + t := testutil.IteratorTesting{ + KeyValue: kv.Clone(), + Iter: iter, + } + + testutil.DoIteratorTesting(&t) + iter.Release() + done <- true + } + } + + It("Should do iterations and seeks correctly #0", + Test(&util.Range{Start: []byte("k0"), Limit: []byte("k6")}), 2.0) + + It("Should do iterations and seeks correctly #1", + Test(&util.Range{Start: []byte(""), Limit: []byte("zzzzzzz")}), 2.0) + }) + } + }) + }) +}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go new file mode 100644 index 0000000000..6465da6e37 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table_suite_test.go @@ -0,0 +1,11 @@ +package table + +import ( + "testing" + + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +func TestTable(t *testing.T) { + testutil.RunSuite(t, "Table Suite") +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go new file mode 100644 index 0000000000..1bc73ed82c --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table_test.go @@ -0,0 +1,123 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package table + +import ( + "bytes" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/storage" + "github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type tableWrapper struct { + *Reader +} + +func (t tableWrapper) TestFind(key []byte) (rkey, rvalue []byte, err error) { + return t.Reader.Find(key, false, nil) +} + +func (t tableWrapper) TestGet(key []byte) (value []byte, err error) { + return t.Reader.Get(key, nil) +} + +func (t tableWrapper) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.Reader.NewIterator(slice, nil) +} + +var _ = testutil.Defer(func() { + Describe("Table", func() { + Describe("approximate offset test", func() { + var ( + buf = &bytes.Buffer{} + o = &opt.Options{ + BlockSize: 1024, + Compression: opt.NoCompression, + } + ) + + // Building the table. + tw := NewWriter(buf, o) + tw.Append([]byte("k01"), []byte("hello")) + tw.Append([]byte("k02"), []byte("hello2")) + tw.Append([]byte("k03"), bytes.Repeat([]byte{'x'}, 10000)) + tw.Append([]byte("k04"), bytes.Repeat([]byte{'x'}, 200000)) + tw.Append([]byte("k05"), bytes.Repeat([]byte{'x'}, 300000)) + tw.Append([]byte("k06"), []byte("hello3")) + tw.Append([]byte("k07"), bytes.Repeat([]byte{'x'}, 100000)) + err := tw.Close() + + It("Should be able to approximate offset of a key correctly", func() { + Expect(err).ShouldNot(HaveOccurred()) + + tr, err := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o) + Expect(err).ShouldNot(HaveOccurred()) + CheckOffset := func(key string, expect, threshold int) { + offset, err := tr.OffsetOf([]byte(key)) + Expect(err).ShouldNot(HaveOccurred()) + Expect(offset).Should(BeNumerically("~", expect, threshold), "Offset of key %q", key) + } + + CheckOffset("k0", 0, 0) + CheckOffset("k01a", 0, 0) + CheckOffset("k02", 0, 0) + CheckOffset("k03", 0, 0) + CheckOffset("k04", 10000, 1000) + CheckOffset("k04a", 210000, 1000) + CheckOffset("k05", 210000, 1000) + CheckOffset("k06", 510000, 1000) + CheckOffset("k07", 510000, 1000) + CheckOffset("xyz", 610000, 2000) + }) + }) + + Describe("read test", func() { + Build := func(kv testutil.KeyValue) testutil.DB { + o := &opt.Options{ + BlockSize: 512, + BlockRestartInterval: 3, + } + buf := &bytes.Buffer{} + + // Building the table. + tw := NewWriter(buf, o) + kv.Iterate(func(i int, key, value []byte) { + tw.Append(key, value) + }) + tw.Close() + + // Opening the table. + tr, _ := NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), storage.FileDesc{}, nil, nil, o) + return tableWrapper{tr} + } + Test := func(kv *testutil.KeyValue, body func(r *Reader)) func() { + return func() { + db := Build(*kv) + if body != nil { + body(db.(tableWrapper).Reader) + } + testutil.KeyValueTesting(nil, *kv, db, nil, nil) + } + } + + testutil.AllKeyValueTesting(nil, Build, nil, nil) + Describe("with one key per block", Test(testutil.KeyValue_Generate(nil, 9, 1, 10, 512, 512), func(r *Reader) { + It("should have correct blocks number", func() { + indexBlock, err := r.readBlock(r.indexBH, true) + Expect(err).To(BeNil()) + Expect(indexBlock.restartsLen).Should(Equal(9)) + }) + })) + }) + }) +}) diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go new file mode 100644 index 0000000000..c8cb44c449 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/testutil_test.go @@ -0,0 +1,91 @@ +// Copyright (c) 2014, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package leveldb + +import ( + . "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/testutil" + "github.com/syndtr/goleveldb/leveldb/util" +) + +type testingDB struct { + *DB + ro *opt.ReadOptions + wo *opt.WriteOptions + stor *testutil.Storage +} + +func (t *testingDB) TestPut(key []byte, value []byte) error { + return t.Put(key, value, t.wo) +} + +func (t *testingDB) TestDelete(key []byte) error { + return t.Delete(key, t.wo) +} + +func (t *testingDB) TestGet(key []byte) (value []byte, err error) { + return t.Get(key, t.ro) +} + +func (t *testingDB) TestHas(key []byte) (ret bool, err error) { + return t.Has(key, t.ro) +} + +func (t *testingDB) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.NewIterator(slice, t.ro) +} + +func (t *testingDB) TestClose() { + err := t.Close() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + err = t.stor.Close() + ExpectWithOffset(1, err).NotTo(HaveOccurred()) +} + +func newTestingDB(o *opt.Options, ro *opt.ReadOptions, wo *opt.WriteOptions) *testingDB { + stor := testutil.NewStorage() + db, err := Open(stor, o) + // FIXME: This may be called from outside It, which may cause panic. + Expect(err).NotTo(HaveOccurred()) + return &testingDB{ + DB: db, + ro: ro, + wo: wo, + stor: stor, + } +} + +type testingTransaction struct { + *Transaction + ro *opt.ReadOptions + wo *opt.WriteOptions +} + +func (t *testingTransaction) TestPut(key []byte, value []byte) error { + return t.Put(key, value, t.wo) +} + +func (t *testingTransaction) TestDelete(key []byte) error { + return t.Delete(key, t.wo) +} + +func (t *testingTransaction) TestGet(key []byte) (value []byte, err error) { + return t.Get(key, t.ro) +} + +func (t *testingTransaction) TestHas(key []byte) (ret bool, err error) { + return t.Has(key, t.ro) +} + +func (t *testingTransaction) TestNewIterator(slice *util.Range) iterator.Iterator { + return t.NewIterator(slice, t.ro) +} + +func (t *testingTransaction) TestClose() {} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go new file mode 100644 index 0000000000..87d96739c4 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/buffer_test.go @@ -0,0 +1,369 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package util + +import ( + "bytes" + "io" + "math/rand" + "runtime" + "testing" +) + +const N = 10000 // make this bigger for a larger (and slower) test +var data string // test data for write tests +var testBytes []byte // test data; same as data but as a slice. + +func init() { + testBytes = make([]byte, N) + for i := 0; i < N; i++ { + testBytes[i] = 'a' + byte(i%26) + } + data = string(testBytes) +} + +// Verify that contents of buf match the string s. +func check(t *testing.T, testname string, buf *Buffer, s string) { + bytes := buf.Bytes() + str := buf.String() + if buf.Len() != len(bytes) { + t.Errorf("%s: buf.Len() == %d, len(buf.Bytes()) == %d", testname, buf.Len(), len(bytes)) + } + + if buf.Len() != len(str) { + t.Errorf("%s: buf.Len() == %d, len(buf.String()) == %d", testname, buf.Len(), len(str)) + } + + if buf.Len() != len(s) { + t.Errorf("%s: buf.Len() == %d, len(s) == %d", testname, buf.Len(), len(s)) + } + + if string(bytes) != s { + t.Errorf("%s: string(buf.Bytes()) == %q, s == %q", testname, string(bytes), s) + } +} + +// Fill buf through n writes of byte slice fub. +// The initial contents of buf corresponds to the string s; +// the result is the final contents of buf returned as a string. +func fillBytes(t *testing.T, testname string, buf *Buffer, s string, n int, fub []byte) string { + check(t, testname+" (fill 1)", buf, s) + for ; n > 0; n-- { + m, err := buf.Write(fub) + if m != len(fub) { + t.Errorf(testname+" (fill 2): m == %d, expected %d", m, len(fub)) + } + if err != nil { + t.Errorf(testname+" (fill 3): err should always be nil, found err == %s", err) + } + s += string(fub) + check(t, testname+" (fill 4)", buf, s) + } + return s +} + +func TestNewBuffer(t *testing.T) { + buf := NewBuffer(testBytes) + check(t, "NewBuffer", buf, data) +} + +// Empty buf through repeated reads into fub. +// The initial contents of buf corresponds to the string s. +func empty(t *testing.T, testname string, buf *Buffer, s string, fub []byte) { + check(t, testname+" (empty 1)", buf, s) + + for { + n, err := buf.Read(fub) + if n == 0 { + break + } + if err != nil { + t.Errorf(testname+" (empty 2): err should always be nil, found err == %s", err) + } + s = s[n:] + check(t, testname+" (empty 3)", buf, s) + } + + check(t, testname+" (empty 4)", buf, "") +} + +func TestBasicOperations(t *testing.T) { + var buf Buffer + + for i := 0; i < 5; i++ { + check(t, "TestBasicOperations (1)", &buf, "") + + buf.Reset() + check(t, "TestBasicOperations (2)", &buf, "") + + buf.Truncate(0) + check(t, "TestBasicOperations (3)", &buf, "") + + n, err := buf.Write([]byte(data[0:1])) + if n != 1 { + t.Errorf("wrote 1 byte, but n == %d", n) + } + if err != nil { + t.Errorf("err should always be nil, but err == %s", err) + } + check(t, "TestBasicOperations (4)", &buf, "a") + + buf.WriteByte(data[1]) + check(t, "TestBasicOperations (5)", &buf, "ab") + + n, err = buf.Write([]byte(data[2:26])) + if n != 24 { + t.Errorf("wrote 25 bytes, but n == %d", n) + } + check(t, "TestBasicOperations (6)", &buf, string(data[0:26])) + + buf.Truncate(26) + check(t, "TestBasicOperations (7)", &buf, string(data[0:26])) + + buf.Truncate(20) + check(t, "TestBasicOperations (8)", &buf, string(data[0:20])) + + empty(t, "TestBasicOperations (9)", &buf, string(data[0:20]), make([]byte, 5)) + empty(t, "TestBasicOperations (10)", &buf, "", make([]byte, 100)) + + buf.WriteByte(data[1]) + c, err := buf.ReadByte() + if err != nil { + t.Error("ReadByte unexpected eof") + } + if c != data[1] { + t.Errorf("ReadByte wrong value c=%v", c) + } + c, err = buf.ReadByte() + if err == nil { + t.Error("ReadByte unexpected not eof") + } + } +} + +func TestLargeByteWrites(t *testing.T) { + var buf Buffer + limit := 30 + if testing.Short() { + limit = 9 + } + for i := 3; i < limit; i += 3 { + s := fillBytes(t, "TestLargeWrites (1)", &buf, "", 5, testBytes) + empty(t, "TestLargeByteWrites (2)", &buf, s, make([]byte, len(data)/i)) + } + check(t, "TestLargeByteWrites (3)", &buf, "") +} + +func TestLargeByteReads(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestLargeReads (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + empty(t, "TestLargeReads (2)", &buf, s, make([]byte, len(data))) + } + check(t, "TestLargeByteReads (3)", &buf, "") +} + +func TestMixedReadsAndWrites(t *testing.T) { + var buf Buffer + s := "" + for i := 0; i < 50; i++ { + wlen := rand.Intn(len(data)) + s = fillBytes(t, "TestMixedReadsAndWrites (1)", &buf, s, 1, testBytes[0:wlen]) + rlen := rand.Intn(len(data)) + fub := make([]byte, rlen) + n, _ := buf.Read(fub) + s = s[n:] + } + empty(t, "TestMixedReadsAndWrites (2)", &buf, s, make([]byte, buf.Len())) +} + +func TestNil(t *testing.T) { + var b *Buffer + if b.String() != "" { + t.Errorf("expected ; got %q", b.String()) + } +} + +func TestReadFrom(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestReadFrom (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + b.ReadFrom(&buf) + empty(t, "TestReadFrom (2)", &b, s, make([]byte, len(data))) + } +} + +func TestWriteTo(t *testing.T) { + var buf Buffer + for i := 3; i < 30; i += 3 { + s := fillBytes(t, "TestWriteTo (1)", &buf, "", 5, testBytes[0:len(testBytes)/i]) + var b Buffer + buf.WriteTo(&b) + empty(t, "TestWriteTo (2)", &b, s, make([]byte, len(data))) + } +} + +func TestNext(t *testing.T) { + b := []byte{0, 1, 2, 3, 4} + tmp := make([]byte, 5) + for i := 0; i <= 5; i++ { + for j := i; j <= 5; j++ { + for k := 0; k <= 6; k++ { + // 0 <= i <= j <= 5; 0 <= k <= 6 + // Check that if we start with a buffer + // of length j at offset i and ask for + // Next(k), we get the right bytes. + buf := NewBuffer(b[0:j]) + n, _ := buf.Read(tmp[0:i]) + if n != i { + t.Fatalf("Read %d returned %d", i, n) + } + bb := buf.Next(k) + want := k + if want > j-i { + want = j - i + } + if len(bb) != want { + t.Fatalf("in %d,%d: len(Next(%d)) == %d", i, j, k, len(bb)) + } + for l, v := range bb { + if v != byte(l+i) { + t.Fatalf("in %d,%d: Next(%d)[%d] = %d, want %d", i, j, k, l, v, l+i) + } + } + } + } + } +} + +var readBytesTests = []struct { + buffer string + delim byte + expected []string + err error +}{ + {"", 0, []string{""}, io.EOF}, + {"a\x00", 0, []string{"a\x00"}, nil}, + {"abbbaaaba", 'b', []string{"ab", "b", "b", "aaab"}, nil}, + {"hello\x01world", 1, []string{"hello\x01"}, nil}, + {"foo\nbar", 0, []string{"foo\nbar"}, io.EOF}, + {"alpha\nbeta\ngamma\n", '\n', []string{"alpha\n", "beta\n", "gamma\n"}, nil}, + {"alpha\nbeta\ngamma", '\n', []string{"alpha\n", "beta\n", "gamma"}, io.EOF}, +} + +func TestReadBytes(t *testing.T) { + for _, test := range readBytesTests { + buf := NewBuffer([]byte(test.buffer)) + var err error + for _, expected := range test.expected { + var bytes []byte + bytes, err = buf.ReadBytes(test.delim) + if string(bytes) != expected { + t.Errorf("expected %q, got %q", expected, bytes) + } + if err != nil { + break + } + } + if err != test.err { + t.Errorf("expected error %v, got %v", test.err, err) + } + } +} + +func TestGrow(t *testing.T) { + x := []byte{'x'} + y := []byte{'y'} + tmp := make([]byte, 72) + for _, startLen := range []int{0, 100, 1000, 10000, 100000} { + xBytes := bytes.Repeat(x, startLen) + for _, growLen := range []int{0, 100, 1000, 10000, 100000} { + buf := NewBuffer(xBytes) + // If we read, this affects buf.off, which is good to test. + readBytes, _ := buf.Read(tmp) + buf.Grow(growLen) + yBytes := bytes.Repeat(y, growLen) + // Check no allocation occurs in write, as long as we're single-threaded. + var m1, m2 runtime.MemStats + runtime.ReadMemStats(&m1) + buf.Write(yBytes) + runtime.ReadMemStats(&m2) + if runtime.GOMAXPROCS(-1) == 1 && m1.Mallocs != m2.Mallocs { + t.Errorf("allocation occurred during write") + } + // Check that buffer has correct data. + if !bytes.Equal(buf.Bytes()[0:startLen-readBytes], xBytes[readBytes:]) { + t.Errorf("bad initial data at %d %d", startLen, growLen) + } + if !bytes.Equal(buf.Bytes()[startLen-readBytes:startLen-readBytes+growLen], yBytes) { + t.Errorf("bad written data at %d %d", startLen, growLen) + } + } + } +} + +// Was a bug: used to give EOF reading empty slice at EOF. +func TestReadEmptyAtEOF(t *testing.T) { + b := new(Buffer) + slice := make([]byte, 0) + n, err := b.Read(slice) + if err != nil { + t.Errorf("read error: %v", err) + } + if n != 0 { + t.Errorf("wrong count; got %d want 0", n) + } +} + +// Tests that we occasionally compact. Issue 5154. +func TestBufferGrowth(t *testing.T) { + var b Buffer + buf := make([]byte, 1024) + b.Write(buf[0:1]) + var cap0 int + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + if i == 0 { + cap0 = cap(b.buf) + } + } + cap1 := cap(b.buf) + // (*Buffer).grow allows for 2x capacity slop before sliding, + // so set our error threshold at 3x. + if cap1 > cap0*3 { + t.Errorf("buffer cap = %d; too big (grew from %d)", cap1, cap0) + } +} + +// From Issue 5154. +func BenchmarkBufferNotEmptyWriteRead(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf[0:1]) + for i := 0; i < 5<<10; i++ { + b.Write(buf) + b.Read(buf) + } + } +} + +// Check that we don't compact too often. From Issue 5154. +func BenchmarkBufferFullSmallReads(b *testing.B) { + buf := make([]byte, 1024) + for i := 0; i < b.N; i++ { + var b Buffer + b.Write(buf) + for b.Len()+20 < cap(b.buf) { + b.Write(buf[:10]) + } + for i := 0; i < 5<<10; i++ { + b.Read(buf[:1]) + b.Write(buf[:1]) + } + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go new file mode 100644 index 0000000000..a35d273ee1 --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/util/hash_test.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012, Suryandaru Triandana +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +package util + +import ( + "testing" +) + +var hashTests = []struct { + data []byte + seed uint32 + hash uint32 +}{ + {nil, 0xbc9f1d34, 0xbc9f1d34}, + {[]byte{0x62}, 0xbc9f1d34, 0xef1345c4}, + {[]byte{0xc3, 0x97}, 0xbc9f1d34, 0x5b663814}, + {[]byte{0xe2, 0x99, 0xa5}, 0xbc9f1d34, 0x323c078f}, + {[]byte{0xe1, 0x80, 0xb9, 0x32}, 0xbc9f1d34, 0xed21633a}, + {[]byte{ + 0x01, 0xc0, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x14, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x14, + 0x00, 0x00, 0x00, 0x18, + 0x28, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x02, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + }, 0x12345678, 0xf333dabb}, +} + +func TestHash(t *testing.T) { + for i, x := range hashTests { + h := Hash(x.data, x.seed) + if h != x.hash { + t.Fatalf("test-%d: invalid hash, %#x vs %#x", i, h, x.hash) + } + } +} diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/version_test.go b/vendor/github.com/syndtr/goleveldb/leveldb/version_test.go new file mode 100644 index 0000000000..a643be104e --- /dev/null +++ b/vendor/github.com/syndtr/goleveldb/leveldb/version_test.go @@ -0,0 +1,181 @@ +package leveldb + +import ( + "encoding/binary" + "reflect" + "testing" + + "github.com/onsi/gomega" + + "github.com/syndtr/goleveldb/leveldb/testutil" +) + +type testFileRec struct { + level int + num int64 +} + +func TestVersionStaging(t *testing.T) { + gomega.RegisterTestingT(t) + stor := testutil.NewStorage() + defer stor.Close() + s, err := newSession(stor, nil) + if err != nil { + t.Fatal(err) + } + + v := newVersion(s) + v.newStaging() + + tmp := make([]byte, 4) + mik := func(i uint64) []byte { + binary.BigEndian.PutUint32(tmp, uint32(i)) + return []byte(makeInternalKey(nil, tmp, 0, keyTypeVal)) + } + + for i, x := range []struct { + add, del []testFileRec + levels [][]int64 + }{ + { + add: []testFileRec{ + {1, 1}, + }, + levels: [][]int64{ + {}, + {1}, + }, + }, + { + add: []testFileRec{ + {1, 1}, + }, + levels: [][]int64{ + {}, + {1}, + }, + }, + { + del: []testFileRec{ + {1, 1}, + }, + levels: [][]int64{}, + }, + { + add: []testFileRec{ + {0, 1}, + {0, 3}, + {0, 2}, + {2, 5}, + {1, 4}, + }, + levels: [][]int64{ + {3, 2, 1}, + {4}, + {5}, + }, + }, + { + add: []testFileRec{ + {1, 6}, + {2, 5}, + }, + del: []testFileRec{ + {0, 1}, + {0, 4}, + }, + levels: [][]int64{ + {3, 2}, + {4, 6}, + {5}, + }, + }, + { + del: []testFileRec{ + {0, 3}, + {0, 2}, + {1, 4}, + {1, 6}, + {2, 5}, + }, + levels: [][]int64{}, + }, + { + add: []testFileRec{ + {0, 1}, + }, + levels: [][]int64{ + {1}, + }, + }, + { + add: []testFileRec{ + {1, 2}, + }, + levels: [][]int64{ + {1}, + {2}, + }, + }, + { + add: []testFileRec{ + {0, 3}, + }, + levels: [][]int64{ + {3, 1}, + {2}, + }, + }, + { + add: []testFileRec{ + {6, 9}, + }, + levels: [][]int64{ + {3, 1}, + {2}, + {}, + {}, + {}, + {}, + {9}, + }, + }, + { + del: []testFileRec{ + {6, 9}, + }, + levels: [][]int64{ + {3, 1}, + {2}, + }, + }, + } { + rec := &sessionRecord{} + for _, f := range x.add { + ik := mik(uint64(f.num)) + rec.addTable(f.level, f.num, 1, ik, ik) + } + for _, f := range x.del { + rec.delTable(f.level, f.num) + } + vs := v.newStaging() + vs.commit(rec) + v = vs.finish() + if len(v.levels) != len(x.levels) { + t.Fatalf("#%d: invalid level count: want=%d got=%d", i, len(x.levels), len(v.levels)) + } + for j, want := range x.levels { + tables := v.levels[j] + if len(want) != len(tables) { + t.Fatalf("#%d.%d: invalid tables count: want=%d got=%d", i, j, len(want), len(tables)) + } + got := make([]int64, len(tables)) + for k, t := range tables { + got[k] = t.fd.Num + } + if !reflect.DeepEqual(want, got) { + t.Fatalf("#%d.%d: invalid tables: want=%v got=%v", i, j, want, got) + } + } + } +} diff --git a/vendor/github.com/tinylib/msgp/.gitignore b/vendor/github.com/tinylib/msgp/.gitignore new file mode 100644 index 0000000000..78c501cf05 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/.gitignore @@ -0,0 +1,5 @@ +_generated/generated.go +_generated/generated_test.go +msgp/defgen_test.go +msgp/cover.out +*~ \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/.travis.yml b/vendor/github.com/tinylib/msgp/.travis.yml new file mode 100644 index 0000000000..9b28626c5f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/.travis.yml @@ -0,0 +1,12 @@ +language: go + +go: + - 1.4 + - 1.5 + - tip + +env: + - GIMME_ARCH=amd64 + - GIMME_ARCH=386 + +script: "make travis" diff --git a/vendor/github.com/tinylib/msgp/Makefile b/vendor/github.com/tinylib/msgp/Makefile new file mode 100644 index 0000000000..81b8b126c3 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/Makefile @@ -0,0 +1,55 @@ + +# NOTE: This Makefile is only necessary if you +# plan on developing the msgp tool and library. +# Installation can still be performed with a +# normal `go install`. + +# generated integration test files +GGEN = ./_generated/generated.go ./_generated/generated_test.go +# generated unit test files +MGEN = ./msgp/defgen_test.go + +SHELL := /bin/bash + +BIN = $(GOBIN)/msgp + +.PHONY: clean wipe install get-deps bench all + +$(BIN): */*.go + @go install ./... + +install: $(BIN) + +$(GGEN): ./_generated/def.go + go generate ./_generated + +$(MGEN): ./msgp/defs_test.go + go generate ./msgp + +test: all + go test -v ./msgp + go test -v ./_generated + +bench: all + go test -bench . ./msgp + go test -bench . ./_generated + +clean: + $(RM) $(GGEN) $(MGEN) + +wipe: clean + $(RM) $(BIN) + +get-deps: + go get -d -t ./... + +all: install $(GGEN) $(MGEN) + +# travis CI enters here +travis: + go get -d -t ./... + go build -o "$${GOPATH%%:*}/bin/msgp" . + go generate ./msgp + go generate ./_generated + go test ./msgp + go test ./_generated diff --git a/vendor/github.com/tinylib/msgp/README.md b/vendor/github.com/tinylib/msgp/README.md new file mode 100644 index 0000000000..a7cc849c8b --- /dev/null +++ b/vendor/github.com/tinylib/msgp/README.md @@ -0,0 +1,104 @@ +MessagePack Code Generator [![Build Status](https://travis-ci.org/tinylib/msgp.svg?branch=master)](https://travis-ci.org/tinylib/msgp) +======= + +[![forthebadge](http://forthebadge.com/badges/uses-badges.svg)](http://forthebadge.com) +[![forthebadge](http://forthebadge.com/badges/ages-12.svg)](http://forthebadge.com) + +This is a code generation tool and serialization library for [MessagePack](http://msgpack.org). It is targeted at the `go generate` [tool](http://tip.golang.org/cmd/go/#hdr-Generate_Go_files_by_processing_source). You can read more about MessagePack [in the wiki](http://github.com/tinylib/msgp/wiki), or at [msgpack.org](http://msgpack.org). + +### Why? + +- Use Go as your schema language +- Speeeeeed (400MB/s on modern hardware) +- [JSON interop](http://godoc.org/github.com/tinylib/msgp/msgp#CopyToJSON) +- [User-defined extensions](http://github.com/tinylib/msgp/wiki/Using-Extensions) +- Type safety +- Encoding flexibility + +### Quickstart + +Note: you need at least go 1.3 to compile this package, and at least go 1.4 to use `go generate`. + +In a source file, include the following directive: + +```go +//go:generate msgp +``` + +The `msgp` command will generate serialization methods for all exported type declarations in the file. + +You can [read more about the code generation options here](http://github.com/tinylib/msgp/wiki/Using-the-Code-Generator). + +### Use + +Field names can be set in much the same way as the `encoding/json` package. For example: + +```go +type Person struct { + Name string `msg:"name"` + Address string `msg:"address"` + Age int `msg:"age"` + Hidden string `msg:"-"` // this field is ignored + unexported bool // this field is also ignored +} +``` + +By default, the code generator will satisfy `msgp.Sizer`, `msgp.Encodable`, `msgp.Decodable`, +`msgp.Marshaler`, and `msgp.Unmarshaler`. Carefully-designed applications can use these methods to do +marshalling/unmarshalling with zero allocations. + +While `msgp.Marshaler` and `msgp.Unmarshaler` are quite similar to the standard library's +`json.Marshaler` and `json.Unmarshaler`, `msgp.Encodable` and `msgp.Decodable` are useful for +stream serialization. (`*msgp.Writer` and `*msgp.Reader` are essentially protocol-aware versions +of `*bufio.Writer` and `*bufio.Reader`, respectively.) + +### Features + + - Extremely fast generated code + - Test and benchmark generation + - JSON interoperability (see `msgp.CopyToJSON() and msgp.UnmarshalAsJSON()`) + - Support for complex type declarations + - Native support for Go's `time.Time`, `complex64`, and `complex128` types + - Generation of both `[]byte`-oriented and `io.Reader/io.Writer`-oriented methods + - Support for arbitrary type system extensions + - [Preprocessor directives](http://github.com/tinylib/msgp/wiki/Preprocessor-Directives) + +Consider the following: +```go +const Eight = 8 +type MyInt int +type Data []byte + +type Struct struct { + Which map[string]*MyInt `msg:"which"` + Other Data `msg:"other"` + Nums [Eight]float64 `msg:"nums"` +} +``` +As long as the declarations of `MyInt` and `Data` are in the same file as `Struct`, the parser will determine that the type information for `MyInt` and `Data` can be passed into the definition of `Struct` before its methods are generated. + +#### Extensions + +MessagePack supports defining your own types through "extensions," which are just a tuple of +the data "type" (`int8`) and the raw binary. You [can see a worked example in the wiki.](http://github.com/tinylib/msgp/wiki/Using-Extensions) + +### Status + +Alpha. I _will_ break stuff. There is an open milestone for Beta stability (targeted for January.) Only the `/msgp` sub-directory will have a stability guarantee. + +You can read more about how `msgp` maps MessagePack types onto Go types [in the wiki](http://github.com/tinylib/msgp/wiki). + +Here some of the known limitations/restrictions: + + - Identifiers from outside the processed source file are assumed (optimistically) to satisfy the generator's interfaces. If this isn't the case, your code will fail to compile. + - Like most serializers, `chan` and `func` fields are ignored, as well as non-exported fields. + - Encoding of `interface{}` is limited to built-ins or types that have explicit encoding methods. + - _Maps must have `string` keys._ This is intentional (as it preserves JSON interop.) Although non-string map keys are not forbidden by the MessagePack standard, many serializers impose this restriction. (It also means *any* well-formed `struct` can be de-serialized into a `map[string]interface{}`.) The only exception to this rule is that the deserializers will allow you to read map keys encoded as `bin` types, due to the fact that some legacy encodings permitted this. (However, those values will still be cast to Go `string`s, and they will be converted to `str` types when re-encoded. It is the responsibility of the user to ensure that map keys are UTF-8 safe in this case.) The same rules hold true for JSON translation. + +If the output compiles, then there's a pretty good chance things are fine. (Plus, we generate tests for you.) *Please, please, please* file an issue if you think the generator is writing broken code. + +### Performance + +If you like benchmarks, see [here.](https://github.com/alecthomas/go_serialization_benchmarks) + +As one might expect, the generated methods that deal with `[]byte` are faster, but the `io.Reader/Writer` methods are generally more memory-efficient for large (> 2KB) objects. diff --git a/vendor/github.com/tinylib/msgp/main.go b/vendor/github.com/tinylib/msgp/main.go new file mode 100644 index 0000000000..4369d739a2 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/main.go @@ -0,0 +1,119 @@ +// msgp is a code generation tool for +// creating methods to serialize and de-serialize +// Go data structures to and from MessagePack. +// +// This package is targeted at the `go generate` tool. +// To use it, include the following directive in a +// go source file with types requiring source generation: +// +// //go:generate msgp +// +// The go generate tool should set the proper environment variables for +// the generator to execute without any command-line flags. However, the +// following options are supported, if you need them: +// +// -o = output file name (default is {input}_gen.go) +// -file = input file name (or directory; default is $GOFILE, which is set by the `go generate` command) +// -io = satisfy the `msgp.Decodable` and `msgp.Encodable` interfaces (default is true) +// -marshal = satisfy the `msgp.Marshaler` and `msgp.Unmarshaler` interfaces (default is true) +// -tests = generate tests and benchmarks (default is true) +// +// For more information, please read README.md, and the wiki at github.com/tinylib/msgp +// +package main + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/tinylib/msgp/gen" + "github.com/tinylib/msgp/parse" + "github.com/tinylib/msgp/printer" + "github.com/ttacon/chalk" +) + +var ( + out = flag.String("o", "", "output file") + file = flag.String("file", "", "input file") + encode = flag.Bool("io", true, "create Encode and Decode methods") + marshal = flag.Bool("marshal", true, "create Marshal and Unmarshal methods") + tests = flag.Bool("tests", true, "create tests and benchmarks") + unexported = flag.Bool("unexported", false, "also process unexported types") +) + +func main() { + flag.Parse() + + // GOFILE is set by go generate + if *file == "" { + *file = os.Getenv("GOFILE") + if *file == "" { + fmt.Println(chalk.Red.Color("No file to parse.")) + os.Exit(1) + } + } + + var mode gen.Method + if *encode { + mode |= (gen.Encode | gen.Decode | gen.Size) + } + if *marshal { + mode |= (gen.Marshal | gen.Unmarshal | gen.Size) + } + if *tests { + mode |= gen.Test + } + + if mode&^gen.Test == 0 { + fmt.Println(chalk.Red.Color("No methods to generate; -io=false && -marshal=false")) + os.Exit(1) + } + + if err := Run(*file, mode, *unexported); err != nil { + fmt.Println(chalk.Red.Color(err.Error())) + os.Exit(1) + } +} + +// Run writes all methods using the associated file or path, e.g. +// +// err := msgp.Run("path/to/myfile.go", gen.Size|gen.Marshal|gen.Unmarshal|gen.Test, false) +// +func Run(gofile string, mode gen.Method, unexported bool) error { + if mode&^gen.Test == 0 { + return nil + } + fmt.Println(chalk.Magenta.Color("======== MessagePack Code Generator =======")) + fmt.Printf(chalk.Magenta.Color(">>> Input: \"%s\"\n"), gofile) + fs, err := parse.File(gofile, unexported) + if err != nil { + return err + } + + if len(fs.Identities) == 0 { + fmt.Println(chalk.Magenta.Color("No types requiring code generation were found!")) + return nil + } + + return printer.PrintFile(newFilename(gofile, fs.Package), fs, mode) +} + +// picks a new file name based on input flags and input filename(s). +func newFilename(old string, pkg string) string { + if *out != "" { + if pre := strings.TrimPrefix(*out, old); len(pre) > 0 && + !strings.HasSuffix(*out, ".go") { + return filepath.Join(old, *out) + } + return *out + } + + if fi, err := os.Stat(old); err == nil && fi.IsDir() { + old = filepath.Join(old, pkg) + } + // new file name is old file name + _gen.go + return strings.TrimSuffix(old, ".go") + "_gen.go" +} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs_test.go b/vendor/github.com/tinylib/msgp/msgp/defs_test.go new file mode 100644 index 0000000000..667dfd6012 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/defs_test.go @@ -0,0 +1,12 @@ +package msgp_test + +//go:generate msgp -o=defgen_test.go -tests=false + +type Blobs []Blob + +type Blob struct { + Name string `msg:"name"` + Float float64 `msg:"float"` + Bytes []byte `msg:"bytes"` + Amount int64 `msg:"amount"` +} diff --git a/vendor/github.com/tinylib/msgp/msgp/edit_test.go b/vendor/github.com/tinylib/msgp/msgp/edit_test.go new file mode 100644 index 0000000000..e33b4e1b09 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/edit_test.go @@ -0,0 +1,200 @@ +package msgp + +import ( + "bytes" + "reflect" + "testing" +) + +func TestRemove(t *testing.T) { + var buf bytes.Buffer + w := NewWriter(&buf) + w.WriteMapHeader(3) + w.WriteString("first") + w.WriteFloat64(-3.1) + w.WriteString("second") + w.WriteString("DELETE ME!!!") + w.WriteString("third") + w.WriteBytes([]byte("blah")) + w.Flush() + + raw := Remove("second", buf.Bytes()) + + m, _, err := ReadMapStrIntfBytes(raw, nil) + if err != nil { + t.Fatal(err) + } + if len(m) != 2 { + t.Errorf("expected %d fields; found %d", 2, len(m)) + } + if _, ok := m["first"]; !ok { + t.Errorf("field %q not found", "first") + } + if _, ok := m["third"]; !ok { + t.Errorf("field %q not found", "third") + } + if _, ok := m["second"]; ok { + t.Errorf("field %q (deleted field) still present", "second") + } +} + +func TestLocate(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteMapHeader(2) + en.WriteString("thing_one") + en.WriteString("value_one") + en.WriteString("thing_two") + en.WriteFloat64(2.0) + en.Flush() + + field := Locate("thing_one", buf.Bytes()) + if len(field) == 0 { + t.Fatal("field not found") + } + + if !HasKey("thing_one", buf.Bytes()) { + t.Fatal("field not found") + } + + var zbuf bytes.Buffer + w := NewWriter(&zbuf) + w.WriteString("value_one") + w.Flush() + + if !bytes.Equal(zbuf.Bytes(), field) { + t.Errorf("got %q; wanted %q", field, zbuf.Bytes()) + } + + zbuf.Reset() + w.WriteFloat64(2.0) + w.Flush() + field = Locate("thing_two", buf.Bytes()) + if len(field) == 0 { + t.Fatal("field not found") + } + if !bytes.Equal(zbuf.Bytes(), field) { + t.Errorf("got %q; wanted %q", field, zbuf.Bytes()) + } + + field = Locate("nope", buf.Bytes()) + if len(field) != 0 { + t.Fatalf("wanted a zero-length returned slice") + } + +} + +func TestReplace(t *testing.T) { + // there are 4 cases that need coverage: + // - new value is smaller than old value + // - new value is the same size as the old value + // - new value is larger than old, but fits within cap(b) + // - new value is larger than old, and doesn't fit within cap(b) + + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteMapHeader(3) + en.WriteString("thing_one") + en.WriteString("value_one") + en.WriteString("thing_two") + en.WriteFloat64(2.0) + en.WriteString("some_bytes") + en.WriteBytes([]byte("here are some bytes")) + en.Flush() + + // same-size replacement + var fbuf bytes.Buffer + w := NewWriter(&fbuf) + w.WriteFloat64(4.0) + w.Flush() + + // replace 2.0 with 4.0 in field two + raw := Replace("thing_two", buf.Bytes(), fbuf.Bytes()) + if len(raw) == 0 { + t.Fatal("field not found") + } + var err error + m := make(map[string]interface{}) + m, _, err = ReadMapStrIntfBytes(raw, m) + if err != nil { + t.Logf("%q", raw) + t.Fatal(err) + } + + if !reflect.DeepEqual(m["thing_two"], 4.0) { + t.Errorf("wanted %v; got %v", 4.0, m["thing_two"]) + } + + // smaller-size replacement + // replace 2.0 with []byte("hi!") + fbuf.Reset() + w.WriteBytes([]byte("hi!")) + w.Flush() + raw = Replace("thing_two", raw, fbuf.Bytes()) + if len(raw) == 0 { + t.Fatal("field not found") + } + + m, _, err = ReadMapStrIntfBytes(raw, m) + if err != nil { + t.Logf("%q", raw) + t.Fatal(err) + } + + if !reflect.DeepEqual(m["thing_two"], []byte("hi!")) { + t.Errorf("wanted %v; got %v", []byte("hi!"), m["thing_two"]) + } + + // larger-size replacement + fbuf.Reset() + w.WriteBytes([]byte("some even larger bytes than before")) + w.Flush() + raw = Replace("some_bytes", raw, fbuf.Bytes()) + if len(raw) == 0 { + t.Logf("%q", raw) + t.Fatal(err) + } + + m, _, err = ReadMapStrIntfBytes(raw, m) + if err != nil { + t.Logf("%q", raw) + t.Fatal(err) + } + + if !reflect.DeepEqual(m["some_bytes"], []byte("some even larger bytes than before")) { + t.Errorf("wanted %v; got %v", []byte("hello there!"), m["some_bytes"]) + } + + // identical in-place replacement + field := Locate("some_bytes", raw) + newraw := CopyReplace("some_bytes", raw, field) + + if !bytes.Equal(newraw, raw) { + t.Logf("in: %q", raw) + t.Logf("out: %q", newraw) + t.Error("bytes not equal after copyreplace") + } +} + +func BenchmarkLocate(b *testing.B) { + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteMapHeader(3) + en.WriteString("thing_one") + en.WriteString("value_one") + en.WriteString("thing_two") + en.WriteFloat64(2.0) + en.WriteString("thing_three") + en.WriteBytes([]byte("hello!")) + en.Flush() + + raw := buf.Bytes() + // bytes/s will be the number of bytes traversed per unit of time + field := Locate("thing_three", raw) + b.SetBytes(int64(len(raw) - len(field))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + Locate("thing_three", raw) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/extension_test.go b/vendor/github.com/tinylib/msgp/msgp/extension_test.go new file mode 100644 index 0000000000..d46fcfee3f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/extension_test.go @@ -0,0 +1,49 @@ +package msgp + +import ( + "bytes" + "math/rand" + "testing" + "time" +) + +var extSizes = [...]int{0, 1, 2, 4, 8, 16, int(tint8), int(tuint16), int(tuint32)} + +func randomExt() RawExtension { + e := RawExtension{} + e.Type = int8(rand.Int()) + e.Data = RandBytes(extSizes[rand.Intn(len(extSizes))]) + return e +} + +func TestReadWriteExtension(t *testing.T) { + rand.Seed(time.Now().Unix()) + var buf bytes.Buffer + en := NewWriter(&buf) + dc := NewReader(&buf) + + for i := 0; i < 25; i++ { + buf.Reset() + e := randomExt() + en.WriteExtension(&e) + en.Flush() + err := dc.ReadExtension(&e) + if err != nil { + t.Errorf("error with extension (length %d): %s", len(buf.Bytes()), err) + } + } +} + +func TestReadWriteExtensionBytes(t *testing.T) { + var bts []byte + rand.Seed(time.Now().Unix()) + + for i := 0; i < 24; i++ { + e := randomExt() + bts, _ = AppendExtension(bts[0:0], &e) + _, err := ReadExtensionBytes(bts, &e) + if err != nil { + t.Errorf("error with extension (length %d): %s", len(bts), err) + } + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_test.go b/vendor/github.com/tinylib/msgp/msgp/file_test.go new file mode 100644 index 0000000000..1cc01cec1c --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file_test.go @@ -0,0 +1,103 @@ +// +build linux darwin dragonfly freebsd netbsd openbsd + +package msgp_test + +import ( + "bytes" + "crypto/rand" + "github.com/tinylib/msgp/msgp" + prand "math/rand" + "os" + "testing" +) + +type rawBytes []byte + +func (r rawBytes) MarshalMsg(b []byte) ([]byte, error) { + return msgp.AppendBytes(b, []byte(r)), nil +} + +func (r rawBytes) Msgsize() int { + return msgp.BytesPrefixSize + len(r) +} + +func (r *rawBytes) UnmarshalMsg(b []byte) ([]byte, error) { + tmp, out, err := msgp.ReadBytesBytes(b, (*(*[]byte)(r))[:0]) + *r = rawBytes(tmp) + return out, err +} + +func TestReadWriteFile(t *testing.T) { + t.Parallel() + + f, err := os.Create("tmpfile") + if err != nil { + t.Fatal(err) + } + defer func() { + f.Close() + os.Remove("tmpfile") + }() + + data := make([]byte, 1024*1024) + rand.Read(data) + + err = msgp.WriteFile(rawBytes(data), f) + if err != nil { + t.Fatal(err) + } + + var out rawBytes + f.Seek(0, os.SEEK_SET) + err = msgp.ReadFile(&out, f) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal([]byte(out), []byte(data)) { + t.Fatal("Input and output not equal.") + } +} + +var blobstrings = []string{"", "a string", "a longer string here!"} +var blobfloats = []float64{0.0, -1.0, 1.0, 3.1415926535} +var blobints = []int64{0, 1, -1, 80000, 1 << 30} +var blobbytes = [][]byte{[]byte{}, []byte("hello"), []byte("{\"is_json\":true,\"is_compact\":\"unable to determine\"}")} + +func BenchmarkWriteReadFile(b *testing.B) { + + // let's not run out of disk space... + if b.N > 10000000 { + b.N = 10000000 + } + + fname := "bench-tmpfile" + f, err := os.Create(fname) + if err != nil { + b.Fatal(err) + } + defer func(f *os.File, name string) { + f.Close() + os.Remove(name) + }(f, fname) + + data := make(Blobs, b.N) + + for i := range data { + data[i].Name = blobstrings[prand.Intn(len(blobstrings))] + data[i].Float = blobfloats[prand.Intn(len(blobfloats))] + data[i].Amount = blobints[prand.Intn(len(blobints))] + data[i].Bytes = blobbytes[prand.Intn(len(blobbytes))] + } + + b.SetBytes(int64(data.Msgsize() / b.N)) + b.ResetTimer() + err = msgp.WriteFile(data, f) + if err != nil { + b.Fatal(err) + } + err = msgp.ReadFile(&data, f) + if err != nil { + b.Fatal(err) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/floatbench_test.go b/vendor/github.com/tinylib/msgp/msgp/floatbench_test.go new file mode 100644 index 0000000000..575b081bb5 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/floatbench_test.go @@ -0,0 +1,25 @@ +package msgp + +import ( + "testing" +) + +func BenchmarkReadWriteFloat32(b *testing.B) { + var f float32 = 3.9081 + bts := AppendFloat32([]byte{}, f) + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = AppendFloat32(bts[0:0], f) + f, bts, _ = ReadFloat32Bytes(bts) + } +} + +func BenchmarkReadWriteFloat64(b *testing.B) { + var f float64 = 3.9081 + bts := AppendFloat64([]byte{}, f) + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts = AppendFloat64(bts[0:0], f) + f, bts, _ = ReadFloat64Bytes(bts) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go new file mode 100644 index 0000000000..726974ab71 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes_test.go @@ -0,0 +1,121 @@ +package msgp + +import ( + "bytes" + "encoding/json" + "testing" + "time" +) + +func TestUnmarshalJSON(t *testing.T) { + var buf bytes.Buffer + enc := NewWriter(&buf) + enc.WriteMapHeader(5) + + enc.WriteString("thing_1") + enc.WriteString("a string object") + + enc.WriteString("a_map") + enc.WriteMapHeader(2) + + // INNER + enc.WriteString("cmplx") + enc.WriteComplex64(complex(1.0, 1.0)) + enc.WriteString("int_b") + enc.WriteInt64(-100) + + enc.WriteString("an extension") + enc.WriteExtension(&RawExtension{Type: 1, Data: []byte("blaaahhh")}) + + enc.WriteString("some bytes") + enc.WriteBytes([]byte("here are some bytes")) + + enc.WriteString("now") + enc.WriteTime(time.Now()) + + enc.Flush() + + var js bytes.Buffer + _, err := UnmarshalAsJSON(&js, buf.Bytes()) + if err != nil { + t.Logf("%s", js.Bytes()) + t.Fatal(err) + } + mp := make(map[string]interface{}) + err = json.Unmarshal(js.Bytes(), &mp) + if err != nil { + t.Log(js.String()) + t.Fatalf("Error unmarshaling: %s", err) + } + + if len(mp) != 5 { + t.Errorf("map length should be %d, not %d", 5, len(mp)) + } + + so, ok := mp["thing_1"] + if !ok || so != "a string object" { + t.Errorf("expected %q; got %q", "a string object", so) + } + + if _, ok := mp["now"]; !ok { + t.Error(`"now" field doesn't exist`) + } + + c, ok := mp["a_map"] + if !ok { + t.Error(`"a_map" field doesn't exist`) + } else { + if m, ok := c.(map[string]interface{}); ok { + if _, ok := m["cmplx"]; !ok { + t.Error(`"a_map.cmplx" doesn't exist`) + } + } else { + t.Error(`can't type-assert "c" to map[string]interface{}`) + } + + } + + t.Logf("JSON: %s", js.Bytes()) +} + +func BenchmarkUnmarshalAsJSON(b *testing.B) { + var buf bytes.Buffer + enc := NewWriter(&buf) + enc.WriteMapHeader(4) + + enc.WriteString("thing_1") + enc.WriteString("a string object") + + enc.WriteString("a_first_map") + enc.WriteMapHeader(2) + enc.WriteString("float_a") + enc.WriteFloat32(1.0) + enc.WriteString("int_b") + enc.WriteInt64(-100) + + enc.WriteString("an array") + enc.WriteArrayHeader(2) + enc.WriteBool(true) + enc.WriteUint(2089) + + enc.WriteString("a_second_map") + enc.WriteMapStrStr(map[string]string{ + "internal_one": "blah", + "internal_two": "blahhh...", + }) + enc.Flush() + + var js bytes.Buffer + bts := buf.Bytes() + _, err := UnmarshalAsJSON(&js, bts) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(js.Bytes()))) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + js.Reset() + UnmarshalAsJSON(&js, bts) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_test.go b/vendor/github.com/tinylib/msgp/msgp/json_test.go new file mode 100644 index 0000000000..439d479009 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_test.go @@ -0,0 +1,142 @@ +package msgp + +import ( + "bytes" + "encoding/json" + "reflect" + "testing" +) + +func TestCopyJSON(t *testing.T) { + var buf bytes.Buffer + enc := NewWriter(&buf) + enc.WriteMapHeader(5) + + enc.WriteString("thing_1") + enc.WriteString("a string object") + + enc.WriteString("a_map") + enc.WriteMapHeader(2) + enc.WriteString("float_a") + enc.WriteFloat32(1.0) + enc.WriteString("int_b") + enc.WriteInt64(-100) + + enc.WriteString("some bytes") + enc.WriteBytes([]byte("here are some bytes")) + enc.WriteString("a bool") + enc.WriteBool(true) + + enc.WriteString("a map") + enc.WriteMapStrStr(map[string]string{ + "internal_one": "blah", + "internal_two": "blahhh...", + }) + enc.Flush() + + var js bytes.Buffer + _, err := CopyToJSON(&js, &buf) + if err != nil { + t.Fatal(err) + } + mp := make(map[string]interface{}) + err = json.Unmarshal(js.Bytes(), &mp) + if err != nil { + t.Log(js.String()) + t.Fatalf("Error unmarshaling: %s", err) + } + + if len(mp) != 5 { + t.Errorf("map length should be %d, not %d", 4, len(mp)) + } + + so, ok := mp["thing_1"] + if !ok || so != "a string object" { + t.Errorf("expected %q; got %q", "a string object", so) + } + + in, ok := mp["a map"] + if !ok { + t.Error("no key 'a map'") + } + if inm, ok := in.(map[string]interface{}); !ok { + t.Error("inner map not type-assertable to map[string]interface{}") + } else { + inm1, ok := inm["internal_one"] + if !ok || !reflect.DeepEqual(inm1, "blah") { + t.Errorf("inner map field %q should be %q, not %q", "internal_one", "blah", inm1) + } + } +} + +func BenchmarkCopyToJSON(b *testing.B) { + var buf bytes.Buffer + enc := NewWriter(&buf) + enc.WriteMapHeader(4) + + enc.WriteString("thing_1") + enc.WriteString("a string object") + + enc.WriteString("a_first_map") + enc.WriteMapHeader(2) + enc.WriteString("float_a") + enc.WriteFloat32(1.0) + enc.WriteString("int_b") + enc.WriteInt64(-100) + + enc.WriteString("an array") + enc.WriteArrayHeader(2) + enc.WriteBool(true) + enc.WriteUint(2089) + + enc.WriteString("a_second_map") + enc.WriteMapStrStr(map[string]string{ + "internal_one": "blah", + "internal_two": "blahhh...", + }) + enc.Flush() + + var js bytes.Buffer + bts := buf.Bytes() + _, err := CopyToJSON(&js, &buf) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(js.Bytes()))) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + js.Reset() + CopyToJSON(&js, bytes.NewReader(bts)) + } +} + +func BenchmarkStdlibJSON(b *testing.B) { + obj := map[string]interface{}{ + "thing_1": "a string object", + "a_first_map": map[string]interface{}{ + "float_a": float32(1.0), + "float_b": -100, + }, + "an array": []interface{}{ + "part_A", + "part_B", + }, + "a_second_map": map[string]interface{}{ + "internal_one": "blah", + "internal_two": "blahhh...", + }, + } + var js bytes.Buffer + err := json.NewEncoder(&js).Encode(&obj) + if err != nil { + b.Fatal(err) + } + b.SetBytes(int64(len(js.Bytes()))) + b.ResetTimer() + b.ReportAllocs() + for i := 0; i < b.N; i++ { + js.Reset() + json.NewEncoder(&js).Encode(&obj) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/number_test.go b/vendor/github.com/tinylib/msgp/msgp/number_test.go new file mode 100644 index 0000000000..3490647c3c --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/number_test.go @@ -0,0 +1,94 @@ +package msgp + +import ( + "bytes" + "testing" +) + +func TestNumber(t *testing.T) { + + n := Number{} + + if n.Type() != IntType { + t.Errorf("expected zero-value type to be %s; got %s", IntType, n.Type()) + } + + if n.String() != "0" { + t.Errorf("expected Number{}.String() to be \"0\" but got %q", n.String()) + } + + n.AsInt(248) + i, ok := n.Int() + if !ok || i != 248 || n.Type() != IntType || n.String() != "248" { + t.Errorf("%d in; %d out!", 248, i) + } + + n.AsFloat64(3.141) + f, ok := n.Float() + if !ok || f != 3.141 || n.Type() != Float64Type || n.String() != "3.141" { + t.Errorf("%f in; %f out!", 3.141, f) + } + + n.AsUint(40000) + u, ok := n.Uint() + if !ok || u != 40000 || n.Type() != UintType || n.String() != "40000" { + t.Errorf("%d in; %d out!", 40000, u) + } + + nums := []interface{}{ + float64(3.14159), + int64(-29081), + uint64(90821983), + float32(3.141), + } + + var dat []byte + var buf bytes.Buffer + wr := NewWriter(&buf) + for _, n := range nums { + dat, _ = AppendIntf(dat, n) + wr.WriteIntf(n) + } + wr.Flush() + + mout := make([]Number, len(nums)) + dout := make([]Number, len(nums)) + + rd := NewReader(&buf) + unm := dat + for i := range nums { + var err error + unm, err = mout[i].UnmarshalMsg(unm) + if err != nil { + t.Fatal("unmarshal error:", err) + } + err = dout[i].DecodeMsg(rd) + if err != nil { + t.Fatal("decode error:", err) + } + if mout[i] != dout[i] { + t.Errorf("for %#v, got %#v from unmarshal and %#v from decode", nums[i], mout[i], dout[i]) + } + } + + buf.Reset() + var odat []byte + for i := range nums { + var err error + odat, err = mout[i].MarshalMsg(odat) + if err != nil { + t.Fatal("marshal error:", err) + } + err = dout[i].EncodeMsg(wr) + } + wr.Flush() + + if !bytes.Equal(dat, odat) { + t.Errorf("marshal: expected output %#v; got %#v", dat, odat) + } + + if !bytes.Equal(dat, buf.Bytes()) { + t.Errorf("encode: expected output %#v; got %#v", dat, buf.Bytes()) + } + +} diff --git a/vendor/github.com/tinylib/msgp/msgp/raw_test.go b/vendor/github.com/tinylib/msgp/msgp/raw_test.go new file mode 100644 index 0000000000..9f3321f449 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/raw_test.go @@ -0,0 +1,85 @@ +package msgp + +import ( + "bytes" + "testing" + "time" +) + +// all standard interfaces +type allifaces interface { + Encodable + Decodable + Marshaler + Unmarshaler + Sizer +} + +func TestRaw(t *testing.T) { + bts := make([]byte, 0, 512) + bts = AppendMapHeader(bts, 3) + bts = AppendString(bts, "key_one") + bts = AppendFloat64(bts, -1.0) + bts = AppendString(bts, "key_two") + bts = AppendString(bts, "value_two") + bts = AppendString(bts, "key_three") + bts = AppendTime(bts, time.Now()) + + var r Raw + + // verify that Raw satisfies + // the interfaces we want it to + var _ allifaces = &r + + // READ TESTS + + extra, err := r.UnmarshalMsg(bts) + if err != nil { + t.Fatal("error from UnmarshalMsg:", err) + } + if len(extra) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(extra)) + } + if !bytes.Equal([]byte(r), bts) { + t.Fatal("value of raw and input slice are not equal after UnmarshalMsg") + } + + r = r[:0] + + var buf bytes.Buffer + buf.Write(bts) + + rd := NewReader(&buf) + + err = r.DecodeMsg(rd) + if err != nil { + t.Fatal("error from DecodeMsg:", err) + } + + if !bytes.Equal([]byte(r), bts) { + t.Fatal("value of raw and input slice are not equal after DecodeMsg") + } + + // WRITE TESTS + + buf.Reset() + wr := NewWriter(&buf) + err = r.EncodeMsg(wr) + if err != nil { + t.Fatal("error from EncodeMsg:", err) + } + + wr.Flush() + if !bytes.Equal(buf.Bytes(), bts) { + t.Fatal("value of buf.Bytes() and input slice are not equal after EncodeMsg") + } + + var outsl []byte + outsl, err = r.MarshalMsg(outsl) + if err != nil { + t.Fatal("error from MarshalMsg:", err) + } + if !bytes.Equal(outsl, bts) { + t.Fatal("value of output and input of MarshalMsg are not equal.") + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go new file mode 100644 index 0000000000..0049471ba1 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes_test.go @@ -0,0 +1,518 @@ +package msgp + +import ( + "bytes" + "reflect" + "testing" + "time" +) + +func TestReadMapHeaderBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []uint32{0, 1, 5, 49082} + + for i, v := range tests { + buf.Reset() + en.WriteMapHeader(v) + en.Flush() + + out, left, err := ReadMapHeaderBytes(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + + if out != v { + t.Errorf("%d in; %d out", v, out) + } + } +} + +func BenchmarkReadMapHeaderBytes(b *testing.B) { + sizes := []uint32{1, 100, tuint16, tuint32} + buf := make([]byte, 0, 5*len(sizes)) + for _, sz := range sizes { + buf = AppendMapHeader(buf, sz) + } + b.SetBytes(int64(len(buf) / len(sizes))) + b.ReportAllocs() + b.ResetTimer() + o := buf + for i := 0; i < b.N; i++ { + _, buf, _ = ReadMapHeaderBytes(buf) + if len(buf) == 0 { + buf = o + } + } +} + +func TestReadArrayHeaderBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []uint32{0, 1, 5, 49082} + + for i, v := range tests { + buf.Reset() + en.WriteArrayHeader(v) + en.Flush() + + out, left, err := ReadArrayHeaderBytes(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + + if out != v { + t.Errorf("%d in; %d out", v, out) + } + } +} + +func BenchmarkReadArrayHeaderBytes(b *testing.B) { + sizes := []uint32{1, 100, tuint16, tuint32} + buf := make([]byte, 0, 5*len(sizes)) + for _, sz := range sizes { + buf = AppendArrayHeader(buf, sz) + } + b.SetBytes(int64(len(buf) / len(sizes))) + b.ReportAllocs() + b.ResetTimer() + o := buf + for i := 0; i < b.N; i++ { + _, buf, _ = ReadArrayHeaderBytes(buf) + if len(buf) == 0 { + buf = o + } + } +} + +func TestReadNilBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteNil() + en.Flush() + + left, err := ReadNilBytes(buf.Bytes()) + if err != nil { + t.Fatal(err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } +} + +func BenchmarkReadNilByte(b *testing.B) { + buf := []byte{mnil} + b.SetBytes(1) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ReadNilBytes(buf) + } +} + +func TestReadFloat64Bytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteFloat64(3.14159) + en.Flush() + + out, left, err := ReadFloat64Bytes(buf.Bytes()) + if err != nil { + t.Fatal(err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if out != 3.14159 { + t.Errorf("%f in; %f out", 3.14159, out) + } +} + +func BenchmarkReadFloat64Bytes(b *testing.B) { + f := float64(3.14159) + buf := make([]byte, 0, 9) + buf = AppendFloat64(buf, f) + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ReadFloat64Bytes(buf) + } +} + +func TestReadFloat32Bytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteFloat32(3.1) + en.Flush() + + out, left, err := ReadFloat32Bytes(buf.Bytes()) + if err != nil { + t.Fatal(err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if out != 3.1 { + t.Errorf("%f in; %f out", 3.1, out) + } +} + +func BenchmarkReadFloat32Bytes(b *testing.B) { + f := float32(3.14159) + buf := make([]byte, 0, 5) + buf = AppendFloat32(buf, f) + b.SetBytes(int64(len(buf))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ReadFloat32Bytes(buf) + } +} + +func TestReadBoolBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []bool{true, false} + + for i, v := range tests { + buf.Reset() + en.WriteBool(v) + en.Flush() + out, left, err := ReadBoolBytes(buf.Bytes()) + + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + + if out != v { + t.Errorf("%t in; %t out", v, out) + } + } +} + +func BenchmarkReadBoolBytes(b *testing.B) { + buf := []byte{mtrue, mfalse, mtrue, mfalse} + b.SetBytes(1) + b.ReportAllocs() + b.ResetTimer() + o := buf + for i := 0; i < b.N; i++ { + _, buf, _ = ReadBoolBytes(buf) + if len(buf) == 0 { + buf = o + } + } +} + +func TestReadInt64Bytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []int64{-5, -30, 0, 1, 127, 300, 40921, 34908219} + + for i, v := range tests { + buf.Reset() + en.WriteInt64(v) + en.Flush() + out, left, err := ReadInt64Bytes(buf.Bytes()) + + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + + if out != v { + t.Errorf("%d in; %d out", v, out) + } + } +} + +func TestReadUint64Bytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []uint64{0, 1, 127, 300, 40921, 34908219} + + for i, v := range tests { + buf.Reset() + en.WriteUint64(v) + en.Flush() + out, left, err := ReadUint64Bytes(buf.Bytes()) + + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + + if out != v { + t.Errorf("%d in; %d out", v, out) + } + } +} + +func TestReadBytesBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")} + var scratch []byte + + for i, v := range tests { + buf.Reset() + en.WriteBytes(v) + en.Flush() + out, left, err := ReadBytesBytes(buf.Bytes(), scratch) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if !bytes.Equal(out, v) { + t.Errorf("%q in; %q out", v, out) + } + } +} + +func TestReadZCBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := [][]byte{[]byte{}, []byte("some bytes"), []byte("some more bytes")} + + for i, v := range tests { + buf.Reset() + en.WriteBytes(v) + en.Flush() + out, left, err := ReadBytesZC(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if !bytes.Equal(out, v) { + t.Errorf("%q in; %q out", v, out) + } + } +} + +func TestReadZCString(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []string{"", "hello", "here's another string......"} + + for i, v := range tests { + buf.Reset() + en.WriteString(v) + en.Flush() + + out, left, err := ReadStringZC(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if string(out) != v { + t.Errorf("%q in; %q out", v, out) + } + } +} + +func TestReadStringBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []string{"", "hello", "here's another string......"} + + for i, v := range tests { + buf.Reset() + en.WriteString(v) + en.Flush() + + out, left, err := ReadStringBytes(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if out != v { + t.Errorf("%q in; %q out", v, out) + } + } +} + +func TestReadComplex128Bytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []complex128{complex(0, 0), complex(12.8, 32.0)} + + for i, v := range tests { + buf.Reset() + en.WriteComplex128(v) + en.Flush() + + out, left, err := ReadComplex128Bytes(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if out != v { + t.Errorf("%f in; %f out", v, out) + } + } +} + +func TestReadComplex64Bytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := []complex64{complex(0, 0), complex(12.8, 32.0)} + + for i, v := range tests { + buf.Reset() + en.WriteComplex64(v) + en.Flush() + + out, left, err := ReadComplex64Bytes(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if out != v { + t.Errorf("%f in; %f out", v, out) + } + } +} + +func TestReadTimeBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + now := time.Now() + en.WriteTime(now) + en.Flush() + out, left, err := ReadTimeBytes(buf.Bytes()) + if err != nil { + t.Fatal(err) + } + + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if !now.Equal(out) { + t.Errorf("%s in; %s out", now, out) + } +} + +func BenchmarkReadTimeBytes(b *testing.B) { + data := AppendTime(nil, time.Now()) + b.SetBytes(15) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + ReadTimeBytes(data) + } +} + +func TestReadIntfBytes(t *testing.T) { + var buf bytes.Buffer + en := NewWriter(&buf) + + tests := make([]interface{}, 0, 10) + tests = append(tests, float64(3.5)) + tests = append(tests, int64(-49082)) + tests = append(tests, uint64(34908)) + tests = append(tests, string("hello!")) + tests = append(tests, []byte("blah.")) + tests = append(tests, map[string]interface{}{ + "key_one": 3.5, + "key_two": "hi.", + }) + + for i, v := range tests { + buf.Reset() + if err := en.WriteIntf(v); err != nil { + t.Fatal(err) + } + en.Flush() + + out, left, err := ReadIntfBytes(buf.Bytes()) + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if len(left) != 0 { + t.Errorf("expected 0 bytes left; found %d", len(left)) + } + if !reflect.DeepEqual(v, out) { + t.Errorf("ReadIntf(): %v in; %v out", v, out) + } + } + +} + +func BenchmarkSkipBytes(b *testing.B) { + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteMapHeader(6) + + en.WriteString("thing_one") + en.WriteString("value_one") + + en.WriteString("thing_two") + en.WriteFloat64(3.14159) + + en.WriteString("some_bytes") + en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd")) + + en.WriteString("the_time") + en.WriteTime(time.Now()) + + en.WriteString("what?") + en.WriteBool(true) + + en.WriteString("ext") + en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")}) + en.Flush() + + bts := buf.Bytes() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := Skip(bts) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_test.go b/vendor/github.com/tinylib/msgp/msgp/read_test.go new file mode 100644 index 0000000000..aa19143969 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_test.go @@ -0,0 +1,724 @@ +package msgp + +import ( + "bytes" + "io" + "math" + "math/rand" + "reflect" + "testing" + "time" +) + +func TestSanity(t *testing.T) { + if !isfixint(0) { + t.Fatal("WUT.") + } +} + +func TestReadIntf(t *testing.T) { + // NOTE: if you include cases + // with, say, int32s, the test + // will fail, b/c integers are + // always read out as int64, and + // unsigned integers as uint64 + + var testCases = []interface{}{ + float64(128.032), + float32(9082.092), + int64(-40), + uint64(9082981), + time.Now(), + "hello!", + []byte("hello!"), + map[string]interface{}{ + "thing-1": "thing-1-value", + "thing-2": int64(800), + "thing-3": []byte("some inner bytes..."), + "thing-4": false, + }, + } + + var buf bytes.Buffer + var v interface{} + dec := NewReader(&buf) + enc := NewWriter(&buf) + + for i, ts := range testCases { + buf.Reset() + err := enc.WriteIntf(ts) + if err != nil { + t.Errorf("Test case %d: %s", i, err) + continue + } + err = enc.Flush() + if err != nil { + t.Fatal(err) + } + v, err = dec.ReadIntf() + if err != nil { + t.Errorf("Test case: %d: %s", i, err) + } + if !reflect.DeepEqual(v, ts) { + t.Errorf("%v in; %v out", ts, v) + } + } + +} + +func TestReadMapHeader(t *testing.T) { + tests := []struct { + Sz uint32 + }{ + {0}, + {1}, + {tuint16}, + {tuint32}, + } + + var buf bytes.Buffer + var sz uint32 + var err error + wr := NewWriter(&buf) + rd := NewReader(&buf) + for i, test := range tests { + buf.Reset() + err = wr.WriteMapHeader(test.Sz) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + sz, err = rd.ReadMapHeader() + if err != nil { + t.Errorf("Test case %d: got error %s", i, err) + } + if sz != test.Sz { + t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz) + } + } +} + +func BenchmarkReadMapHeader(b *testing.B) { + sizes := []uint32{0, 1, tuint16, tuint32} + data := make([]byte, 0, len(sizes)*5) + for _, d := range sizes { + data = AppendMapHeader(data, d) + } + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data) / len(sizes))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + rd.ReadMapHeader() + } +} + +func TestReadArrayHeader(t *testing.T) { + tests := []struct { + Sz uint32 + }{ + {0}, + {1}, + {tuint16}, + {tuint32}, + } + + var buf bytes.Buffer + var sz uint32 + var err error + wr := NewWriter(&buf) + rd := NewReader(&buf) + for i, test := range tests { + buf.Reset() + err = wr.WriteArrayHeader(test.Sz) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + sz, err = rd.ReadArrayHeader() + if err != nil { + t.Errorf("Test case %d: got error %s", i, err) + } + if sz != test.Sz { + t.Errorf("Test case %d: wrote size %d; got size %d", i, test.Sz, sz) + } + } +} + +func BenchmarkReadArrayHeader(b *testing.B) { + sizes := []uint32{0, 1, tuint16, tuint32} + data := make([]byte, 0, len(sizes)*5) + for _, d := range sizes { + data = AppendArrayHeader(data, d) + } + rd := NewReader(NewEndlessReader(data, b)) + b.ReportAllocs() + b.SetBytes(int64(len(data) / len(sizes))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + rd.ReadArrayHeader() + } +} + +func TestReadNil(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + wr.WriteNil() + wr.Flush() + err := rd.ReadNil() + if err != nil { + t.Fatal(err) + } +} + +func BenchmarkReadNil(b *testing.B) { + data := AppendNil(nil) + rd := NewReader(NewEndlessReader(data, b)) + b.ReportAllocs() + b.SetBytes(1) + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := rd.ReadNil() + if err != nil { + b.Fatal(err) + } + } +} + +func TestReadFloat64(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + for i := 0; i < 100; i++ { + buf.Reset() + + flt := (rand.Float64() - 0.5) * math.MaxFloat64 + err := wr.WriteFloat64(flt) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + out, err := rd.ReadFloat64() + if err != nil { + t.Errorf("Error reading %f: %s", flt, err) + continue + } + + if out != flt { + t.Errorf("Put in %f but got out %f", flt, out) + } + } +} + +func BenchmarkReadFloat64(b *testing.B) { + fs := []float64{rand.Float64(), rand.Float64(), rand.Float64(), rand.Float64()} + data := make([]byte, 0, 9*len(fs)) + for _, f := range fs { + data = AppendFloat64(data, f) + } + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(9) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadFloat64() + if err != nil { + b.Fatal(err) + } + } +} + +func TestReadFloat32(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + for i := 0; i < 10000; i++ { + buf.Reset() + + flt := (rand.Float32() - 0.5) * math.MaxFloat32 + err := wr.WriteFloat32(flt) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + out, err := rd.ReadFloat32() + if err != nil { + t.Errorf("Error reading %f: %s", flt, err) + continue + } + + if out != flt { + t.Errorf("Put in %f but got out %f", flt, out) + } + } +} + +func BenchmarkReadFloat32(b *testing.B) { + fs := []float32{rand.Float32(), rand.Float32(), rand.Float32(), rand.Float32()} + data := make([]byte, 0, 5*len(fs)) + for _, f := range fs { + data = AppendFloat32(data, f) + } + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(5) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadFloat32() + if err != nil { + b.Fatal(err) + } + } +} + +func TestReadInt64(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + ints := []int64{-100000, -5000, -5, 0, 8, 240, int64(tuint16), int64(tuint32), int64(tuint64)} + + for i, num := range ints { + buf.Reset() + + err := wr.WriteInt64(num) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + out, err := rd.ReadInt64() + if err != nil { + t.Fatal(err) + } + if out != num { + t.Errorf("Test case %d: put %d in and got %d out", i, num, out) + } + } +} + +func BenchmarkReadInt64(b *testing.B) { + is := []int64{0, 1, 65000, rand.Int63()} + data := make([]byte, 0, 9*len(is)) + for _, n := range is { + data = AppendInt64(data, n) + } + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data) / len(is))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadInt64() + if err != nil { + b.Fatal(err) + } + } +} + +func TestReadUint64(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + ints := []uint64{0, 8, 240, uint64(tuint16), uint64(tuint32), uint64(tuint64)} + + for i, num := range ints { + buf.Reset() + + err := wr.WriteUint64(num) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + out, err := rd.ReadUint64() + if out != num { + t.Errorf("Test case %d: put %d in and got %d out", i, num, out) + } + } +} + +func BenchmarkReadUint64(b *testing.B) { + us := []uint64{0, 1, 10000, uint64(rand.Uint32() * 4)} + data := make([]byte, 0, 9*len(us)) + for _, n := range us { + data = AppendUint64(data, n) + } + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data) / len(us))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadUint64() + if err != nil { + b.Fatal(err) + } + } +} + +func TestReadBytes(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + sizes := []int{0, 1, 225, int(tuint32)} + var scratch []byte + for i, size := range sizes { + buf.Reset() + bts := RandBytes(size) + + err := wr.WriteBytes(bts) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + out, err := rd.ReadBytes(scratch) + if err != nil { + t.Errorf("test case %d: %s", i, err) + continue + } + + if !bytes.Equal(bts, out) { + t.Errorf("test case %d: Bytes not equal.", i) + } + + } +} + +func benchBytes(size uint32, b *testing.B) { + data := make([]byte, 0, size+5) + data = AppendBytes(data, RandBytes(int(size))) + + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + var scratch []byte + var err error + for i := 0; i < b.N; i++ { + scratch, err = rd.ReadBytes(scratch) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRead16Bytes(b *testing.B) { + benchBytes(16, b) +} + +func BenchmarkRead256Bytes(b *testing.B) { + benchBytes(256, b) +} + +// This particular case creates +// an object larger than the default +// read buffer size, so it's a decent +// indicator of worst-case performance. +func BenchmarkRead2048Bytes(b *testing.B) { + benchBytes(2048, b) +} + +func TestReadString(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + sizes := []int{0, 1, 225, int(math.MaxUint16 + 5)} + for i, size := range sizes { + buf.Reset() + in := string(RandBytes(size)) + + err := wr.WriteString(in) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + out, err := rd.ReadString() + if err != nil { + t.Errorf("test case %d: %s", i, err) + } + if out != in { + t.Errorf("test case %d: strings not equal.", i) + t.Errorf("string (len = %d) in; string (len = %d) out", size, len(out)) + } + + } +} + +func benchString(size uint32, b *testing.B) { + str := string(RandBytes(int(size))) + data := make([]byte, 0, len(str)+5) + data = AppendString(data, str) + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadString() + if err != nil { + b.Fatal(err) + } + } +} + +func benchStringAsBytes(size uint32, b *testing.B) { + str := string(RandBytes(int(size))) + data := make([]byte, 0, len(str)+5) + data = AppendString(data, str) + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + var scratch []byte + var err error + for i := 0; i < b.N; i++ { + scratch, err = rd.ReadStringAsBytes(scratch) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkRead16StringAsBytes(b *testing.B) { + benchStringAsBytes(16, b) +} + +func BenchmarkRead256StringAsBytes(b *testing.B) { + benchStringAsBytes(256, b) +} + +func BenchmarkRead16String(b *testing.B) { + benchString(16, b) +} + +func BenchmarkRead256String(b *testing.B) { + benchString(256, b) +} + +func TestReadComplex64(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + for i := 0; i < 100; i++ { + buf.Reset() + f := complex(rand.Float32()*math.MaxFloat32, rand.Float32()*math.MaxFloat32) + + wr.WriteComplex64(f) + err := wr.Flush() + if err != nil { + t.Fatal(err) + } + + out, err := rd.ReadComplex64() + if err != nil { + t.Error(err) + continue + } + + if out != f { + t.Errorf("Wrote %f; read %f", f, out) + } + + } +} + +func BenchmarkReadComplex64(b *testing.B) { + f := complex(rand.Float32(), rand.Float32()) + data := AppendComplex64(nil, f) + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadComplex64() + if err != nil { + b.Fatal(err) + } + } +} + +func TestReadComplex128(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + for i := 0; i < 10; i++ { + buf.Reset() + f := complex(rand.Float64()*math.MaxFloat64, rand.Float64()*math.MaxFloat64) + + wr.WriteComplex128(f) + err := wr.Flush() + if err != nil { + t.Fatal(err) + } + + out, err := rd.ReadComplex128() + if err != nil { + t.Error(err) + continue + } + if out != f { + t.Errorf("Wrote %f; read %f", f, out) + } + + } +} + +func BenchmarkReadComplex128(b *testing.B) { + f := complex(rand.Float64(), rand.Float64()) + data := AppendComplex128(nil, f) + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadComplex128() + if err != nil { + b.Fatal(err) + } + } +} + +func TestTime(t *testing.T) { + var buf bytes.Buffer + now := time.Now() + en := NewWriter(&buf) + dc := NewReader(&buf) + + err := en.WriteTime(now) + if err != nil { + t.Fatal(err) + } + err = en.Flush() + if err != nil { + t.Fatal(err) + } + + out, err := dc.ReadTime() + if err != nil { + t.Fatal(err) + } + + // check for equivalence + if !now.Equal(out) { + t.Fatalf("%s in; %s out", now, out) + } + + // check for time.Local zone + if now != out { + t.Error("returned time.Time not set to time.Local") + } +} + +func BenchmarkReadTime(b *testing.B) { + t := time.Now() + data := AppendTime(nil, t) + rd := NewReader(NewEndlessReader(data, b)) + b.SetBytes(int64(len(data))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := rd.ReadTime() + if err != nil { + b.Fatal(err) + } + } +} + +func TestSkip(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + rd := NewReader(&buf) + + wr.WriteMapHeader(4) + wr.WriteString("key_1") + wr.WriteBytes([]byte("value_1")) + wr.WriteString("key_2") + wr.WriteFloat64(2.0) + wr.WriteString("key_3") + wr.WriteComplex128(3.0i) + wr.WriteString("key_4") + wr.WriteInt64(49080432189) + wr.Flush() + + // this should skip the whole map + err := rd.Skip() + if err != nil { + t.Fatal(err) + } + + tp, err := rd.NextType() + if err != io.EOF { + t.Errorf("expected %q; got %q", io.EOF, err) + t.Errorf("returned type %q", tp) + } + +} + +func BenchmarkSkip(b *testing.B) { + var buf bytes.Buffer + en := NewWriter(&buf) + en.WriteMapHeader(6) + + en.WriteString("thing_one") + en.WriteString("value_one") + + en.WriteString("thing_two") + en.WriteFloat64(3.14159) + + en.WriteString("some_bytes") + en.WriteBytes([]byte("nkl4321rqw908vxzpojnlk2314rqew098-s09123rdscasd")) + + en.WriteString("the_time") + en.WriteTime(time.Now()) + + en.WriteString("what?") + en.WriteBool(true) + + en.WriteString("ext") + en.WriteExtension(&RawExtension{Type: 55, Data: []byte("raw data!!!")}) + en.Flush() + + bts := buf.Bytes() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + + rd := NewReader(NewEndlessReader(bts, b)) + for i := 0; i < b.N; i++ { + err := rd.Skip() + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go new file mode 100644 index 0000000000..fa0b7d535c --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes_test.go @@ -0,0 +1,319 @@ +package msgp + +import ( + "bytes" + "math" + "testing" + "time" +) + +func TestIssue116(t *testing.T) { + data := AppendInt64(nil, math.MinInt64) + i, _, err := ReadInt64Bytes(data) + if err != nil { + t.Fatal(err) + } + if i != math.MinInt64 { + t.Errorf("put %d in and got %d out", int64(math.MinInt64), i) + } + + var buf bytes.Buffer + + w := NewWriter(&buf) + w.WriteInt64(math.MinInt64) + w.Flush() + i, err = NewReader(&buf).ReadInt64() + if err != nil { + t.Fatal(err) + } + if i != math.MinInt64 { + t.Errorf("put %d in and got %d out", int64(math.MinInt64), i) + } +} + +func TestAppendMapHeader(t *testing.T) { + szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32} + var buf bytes.Buffer + en := NewWriter(&buf) + + var bts []byte + for _, sz := range szs { + buf.Reset() + en.WriteMapHeader(sz) + en.Flush() + bts = AppendMapHeader(bts[0:0], sz) + + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts) + } + } +} + +func BenchmarkAppendMapHeader(b *testing.B) { + buf := make([]byte, 0, 9) + N := b.N / 4 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < N; i++ { + AppendMapHeader(buf[:0], 0) + AppendMapHeader(buf[:0], uint32(tint8)) + AppendMapHeader(buf[:0], tuint16) + AppendMapHeader(buf[:0], tuint32) + } +} + +func TestAppendArrayHeader(t *testing.T) { + szs := []uint32{0, 1, uint32(tint8), uint32(tint16), tuint32} + var buf bytes.Buffer + en := NewWriter(&buf) + + var bts []byte + for _, sz := range szs { + buf.Reset() + en.WriteArrayHeader(sz) + en.Flush() + bts = AppendArrayHeader(bts[0:0], sz) + + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for size %d, encoder wrote %q and append wrote %q", sz, buf.Bytes(), bts) + } + } +} + +func BenchmarkAppendArrayHeader(b *testing.B) { + buf := make([]byte, 0, 9) + N := b.N / 4 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < N; i++ { + AppendArrayHeader(buf[:0], 0) + AppendArrayHeader(buf[:0], uint32(tint8)) + AppendArrayHeader(buf[:0], tuint16) + AppendArrayHeader(buf[:0], tuint32) + } +} + +func TestAppendNil(t *testing.T) { + var bts []byte + bts = AppendNil(bts[0:0]) + if bts[0] != mnil { + t.Fatal("bts[0] is not 'nil'") + } +} + +func TestAppendFloat64(t *testing.T) { + f := float64(3.14159) + var buf bytes.Buffer + en := NewWriter(&buf) + + var bts []byte + en.WriteFloat64(f) + en.Flush() + bts = AppendFloat64(bts[0:0], f) + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts) + } +} + +func BenchmarkAppendFloat64(b *testing.B) { + f := float64(3.14159) + buf := make([]byte, 0, 9) + b.SetBytes(9) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + AppendFloat64(buf[0:0], f) + } +} + +func TestAppendFloat32(t *testing.T) { + f := float32(3.14159) + var buf bytes.Buffer + en := NewWriter(&buf) + + var bts []byte + en.WriteFloat32(f) + en.Flush() + bts = AppendFloat32(bts[0:0], f) + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for float %f, encoder wrote %q; append wrote %q", f, buf.Bytes(), bts) + } +} + +func BenchmarkAppendFloat32(b *testing.B) { + f := float32(3.14159) + buf := make([]byte, 0, 5) + b.SetBytes(5) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + AppendFloat32(buf[0:0], f) + } +} + +func TestAppendInt64(t *testing.T) { + is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)} + var buf bytes.Buffer + en := NewWriter(&buf) + + var bts []byte + for _, i := range is { + buf.Reset() + en.WriteInt64(i) + en.Flush() + bts = AppendInt64(bts[0:0], i) + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for int64 %d, encoder wrote %q; append wrote %q", i, buf.Bytes(), bts) + } + } +} + +func BenchmarkAppendInt64(b *testing.B) { + is := []int64{0, 1, -5, -50, int64(tint16), int64(tint32), int64(tint64)} + l := len(is) + buf := make([]byte, 0, 9) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + AppendInt64(buf[0:0], is[i%l]) + } +} + +func TestAppendUint64(t *testing.T) { + us := []uint64{0, 1, uint64(tuint16), uint64(tuint32), tuint64} + var buf bytes.Buffer + en := NewWriter(&buf) + var bts []byte + + for _, u := range us { + buf.Reset() + en.WriteUint64(u) + en.Flush() + bts = AppendUint64(bts[0:0], u) + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for uint64 %d, encoder wrote %q; append wrote %q", u, buf.Bytes(), bts) + } + } +} + +func BenchmarkAppendUint64(b *testing.B) { + us := []uint64{0, 1, 15, uint64(tuint16), uint64(tuint32), tuint64} + buf := make([]byte, 0, 9) + b.ReportAllocs() + b.ResetTimer() + l := len(us) + for i := 0; i < b.N; i++ { + AppendUint64(buf[0:0], us[i%l]) + } +} + +func TestAppendBytes(t *testing.T) { + sizes := []int{0, 1, 225, int(tuint32)} + var buf bytes.Buffer + en := NewWriter(&buf) + var bts []byte + + for _, sz := range sizes { + buf.Reset() + b := RandBytes(sz) + en.WriteBytes(b) + en.Flush() + bts = AppendBytes(b[0:0], b) + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for bytes of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts)) + } + } +} + +func benchappendBytes(size uint32, b *testing.B) { + bts := RandBytes(int(size)) + buf := make([]byte, 0, len(bts)+5) + b.SetBytes(int64(len(bts) + 5)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + AppendBytes(buf[0:0], bts) + } +} + +func BenchmarkAppend16Bytes(b *testing.B) { benchappendBytes(16, b) } + +func BenchmarkAppend256Bytes(b *testing.B) { benchappendBytes(256, b) } + +func BenchmarkAppend2048Bytes(b *testing.B) { benchappendBytes(2048, b) } + +func TestAppendString(t *testing.T) { + sizes := []int{0, 1, 225, int(tuint32)} + var buf bytes.Buffer + en := NewWriter(&buf) + var bts []byte + + for _, sz := range sizes { + buf.Reset() + s := string(RandBytes(sz)) + en.WriteString(s) + en.Flush() + bts = AppendString(bts[0:0], s) + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for string of length %d, encoder wrote %d bytes and append wrote %d bytes", sz, buf.Len(), len(bts)) + t.Errorf("WriteString prefix: %x", buf.Bytes()[0:5]) + t.Errorf("Appendstring prefix: %x", bts[0:5]) + } + } +} + +func benchappendString(size uint32, b *testing.B) { + str := string(RandBytes(int(size))) + buf := make([]byte, 0, len(str)+5) + b.SetBytes(int64(len(str) + 5)) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + AppendString(buf[0:0], str) + } +} + +func BenchmarkAppend16String(b *testing.B) { benchappendString(16, b) } + +func BenchmarkAppend256String(b *testing.B) { benchappendString(256, b) } + +func BenchmarkAppend2048String(b *testing.B) { benchappendString(2048, b) } + +func TestAppendBool(t *testing.T) { + vs := []bool{true, false} + var buf bytes.Buffer + en := NewWriter(&buf) + var bts []byte + + for _, v := range vs { + buf.Reset() + en.WriteBool(v) + en.Flush() + bts = AppendBool(bts[0:0], v) + if !bytes.Equal(buf.Bytes(), bts) { + t.Errorf("for %t, encoder wrote %q and append wrote %q", v, buf.Bytes(), bts) + } + } +} + +func BenchmarkAppendBool(b *testing.B) { + vs := []bool{true, false} + buf := make([]byte, 0, 1) + b.SetBytes(1) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + AppendBool(buf[0:0], vs[i%2]) + } +} + +func BenchmarkAppendTime(b *testing.B) { + t := time.Now() + b.SetBytes(15) + buf := make([]byte, 0, 15) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + AppendTime(buf[0:0], t) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_test.go b/vendor/github.com/tinylib/msgp/msgp/write_test.go new file mode 100644 index 0000000000..c5e97fe2b9 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_test.go @@ -0,0 +1,405 @@ +package msgp + +import ( + "bytes" + "math" + "math/rand" + "testing" + "time" +) + +var ( + tint8 int8 = 126 // cannot be most fix* types + tint16 int16 = 150 // cannot be int8 + tint32 int32 = math.MaxInt16 + 100 // cannot be int16 + tint64 int64 = math.MaxInt32 + 100 // cannot be int32 + tuint16 uint32 = 300 // cannot be uint8 + tuint32 uint32 = math.MaxUint16 + 100 // cannot be uint16 + tuint64 uint64 = math.MaxUint32 + 100 // cannot be uint32 +) + +func RandBytes(sz int) []byte { + out := make([]byte, sz) + for i := range out { + out[i] = byte(rand.Int63n(math.MaxInt64) % 256) + } + return out +} + +func TestWriteMapHeader(t *testing.T) { + tests := []struct { + Sz uint32 + Outbytes []byte + }{ + {0, []byte{mfixmap}}, + {1, []byte{mfixmap | byte(1)}}, + {100, []byte{mmap16, byte(uint16(100) >> 8), byte(uint16(100))}}, + {tuint32, + []byte{mmap32, + byte(tuint32 >> 24), + byte(tuint32 >> 16), + byte(tuint32 >> 8), + byte(tuint32), + }, + }, + } + + var buf bytes.Buffer + var err error + wr := NewWriter(&buf) + for _, test := range tests { + buf.Reset() + err = wr.WriteMapHeader(test.Sz) + if err != nil { + t.Error(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(buf.Bytes(), test.Outbytes) { + t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes()) + } + } +} + +func BenchmarkWriteMapHeader(b *testing.B) { + wr := NewWriter(Nowhere) + N := b.N / 4 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < N; i++ { + wr.WriteMapHeader(0) + wr.WriteMapHeader(8) + wr.WriteMapHeader(tuint16) + wr.WriteMapHeader(tuint32) + } +} + +func TestWriteArrayHeader(t *testing.T) { + tests := []struct { + Sz uint32 + Outbytes []byte + }{ + {0, []byte{mfixarray}}, + {1, []byte{mfixarray | byte(1)}}, + {tuint16, []byte{marray16, byte(tuint16 >> 8), byte(tuint16)}}, + {tuint32, []byte{marray32, byte(tuint32 >> 24), byte(tuint32 >> 16), byte(tuint32 >> 8), byte(tuint32)}}, + } + + var buf bytes.Buffer + var err error + wr := NewWriter(&buf) + for _, test := range tests { + buf.Reset() + err = wr.WriteArrayHeader(test.Sz) + if err != nil { + t.Error(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(buf.Bytes(), test.Outbytes) { + t.Errorf("Expected bytes %x; got %x", test.Outbytes, buf.Bytes()) + } + } +} + +func TestReadWriteStringHeader(t *testing.T) { + sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32} + var buf bytes.Buffer + var err error + wr := NewWriter(&buf) + for _, sz := range sizes { + buf.Reset() + err = wr.WriteStringHeader(sz) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + var nsz uint32 + nsz, err = NewReader(&buf).ReadStringHeader() + if err != nil { + t.Fatal(err) + } + if nsz != sz { + t.Errorf("put in size %d but got out size %d", sz, nsz) + } + } +} + +func TestReadWriteBytesHeader(t *testing.T) { + sizes := []uint32{0, 5, 8, 19, 150, tuint16, tuint32} + var buf bytes.Buffer + var err error + wr := NewWriter(&buf) + for _, sz := range sizes { + buf.Reset() + err = wr.WriteBytesHeader(sz) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + var nsz uint32 + nsz, err = NewReader(&buf).ReadBytesHeader() + if err != nil { + t.Fatal(err) + } + if nsz != sz { + t.Errorf("put in size %d but got out size %d", sz, nsz) + } + } +} + +func BenchmarkWriteArrayHeader(b *testing.B) { + wr := NewWriter(Nowhere) + N := b.N / 4 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < N; i++ { + wr.WriteArrayHeader(0) + wr.WriteArrayHeader(16) + wr.WriteArrayHeader(tuint16) + wr.WriteArrayHeader(tuint32) + } +} + +func TestWriteNil(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + + err := wr.WriteNil() + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + bts := buf.Bytes() + if bts[0] != mnil { + t.Errorf("Expected %x; wrote %x", mnil, bts[0]) + } +} + +func TestWriteFloat64(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + + for i := 0; i < 10000; i++ { + buf.Reset() + flt := (rand.Float64() - 0.5) * math.MaxFloat64 + err := wr.WriteFloat64(flt) + if err != nil { + t.Errorf("Error with %f: %s", flt, err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + bts := buf.Bytes() + + if bts[0] != mfloat64 { + t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64) + } + } +} + +func BenchmarkWriteFloat64(b *testing.B) { + f := rand.Float64() + wr := NewWriter(Nowhere) + b.SetBytes(9) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + wr.WriteFloat64(f) + } +} + +func TestWriteFloat32(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + + for i := 0; i < 10000; i++ { + buf.Reset() + flt := (rand.Float32() - 0.5) * math.MaxFloat32 + err := wr.WriteFloat32(flt) + if err != nil { + t.Errorf("Error with %f: %s", flt, err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + bts := buf.Bytes() + + if bts[0] != mfloat32 { + t.Errorf("Leading byte was %x and not %x", bts[0], mfloat64) + } + } +} + +func BenchmarkWriteFloat32(b *testing.B) { + f := rand.Float32() + wr := NewWriter(Nowhere) + b.SetBytes(5) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + wr.WriteFloat32(f) + } +} + +func TestWriteInt64(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + + for i := 0; i < 10000; i++ { + buf.Reset() + + num := (rand.Int63n(math.MaxInt64)) - (math.MaxInt64 / 2) + + err := wr.WriteInt64(num) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + if buf.Len() > 9 { + t.Errorf("buffer length should be <= 9; it's %d", buf.Len()) + } + } +} + +func BenchmarkWriteInt64(b *testing.B) { + wr := NewWriter(Nowhere) + b.SetBytes(9) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + wr.WriteInt64(int64(tint64)) + } +} + +func TestWriteUint64(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + + for i := 0; i < 10000; i++ { + buf.Reset() + + num := uint64(rand.Int63n(math.MaxInt64)) + + err := wr.WriteUint64(num) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + if buf.Len() > 9 { + t.Errorf("buffer length should be <= 9; it's %d", buf.Len()) + } + } +} + +func BenchmarkWriteUint64(b *testing.B) { + wr := NewWriter(Nowhere) + b.SetBytes(9) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + wr.WriteUint64(uint64(tuint64)) + } +} + +func TestWriteBytes(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + sizes := []int{0, 1, 225, int(tuint32)} + + for _, size := range sizes { + buf.Reset() + bts := RandBytes(size) + + err := wr.WriteBytes(bts) + if err != nil { + t.Fatal(err) + } + + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + + if buf.Len() < len(bts) { + t.Errorf("somehow, %d bytes were encoded in %d bytes", len(bts), buf.Len()) + } + } +} + +func benchwrBytes(size uint32, b *testing.B) { + bts := RandBytes(int(size)) + wr := NewWriter(Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + wr.WriteBytes(bts) + } +} + +func BenchmarkWrite16Bytes(b *testing.B) { benchwrBytes(16, b) } + +func BenchmarkWrite256Bytes(b *testing.B) { benchwrBytes(256, b) } + +func BenchmarkWrite2048Bytes(b *testing.B) { benchwrBytes(2048, b) } + +func TestWriteTime(t *testing.T) { + var buf bytes.Buffer + wr := NewWriter(&buf) + tm := time.Now() + err := wr.WriteTime(tm) + if err != nil { + t.Fatal(err) + } + err = wr.Flush() + if err != nil { + t.Fatal(err) + } + if buf.Len() != 15 { + t.Errorf("expected time.Time to be %d bytes; got %d", 15, buf.Len()) + } + + newt, err := NewReader(&buf).ReadTime() + if err != nil { + t.Fatal(err) + } + if !newt.Equal(tm) { + t.Errorf("in/out not equal; %s in and %s out", tm, newt) + } +} + +func BenchmarkWriteTime(b *testing.B) { + t := time.Now() + wr := NewWriter(Nowhere) + b.SetBytes(15) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + wr.WriteTime(t) + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore new file mode 100644 index 0000000000..2734907909 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/.gitignore @@ -0,0 +1,15 @@ +*.out +*.test +*.xml +*.swp +.idea/ +.tmp/ +*.iml +*.cov +*.html +*.log +gen/thrift/js +gen/thrift/py +vendor/ +crossdock-main +crossdock/jaeger-docker-compose.yml diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules new file mode 100644 index 0000000000..295ebcf622 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/.gitmodules @@ -0,0 +1,3 @@ +[submodule "idl"] + path = idl + url = https://github.com/uber/jaeger-idl.git diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml new file mode 100644 index 0000000000..44658eaa6e --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml @@ -0,0 +1,44 @@ +sudo: required + +language: go +go_import_path: github.com/uber/jaeger-client-go + +dist: trusty + +matrix: + include: + - go: 1.7 + env: + - TESTS=true + - COVERAGE=true + - go: 1.7 + env: + - CROSSDOCK=true + +services: + - docker + +env: + global: + - DOCKER_COMPOSE_VERSION=1.8.0 + - GO15VENDOREXPERIMENT=1 + - COMMIT=${TRAVIS_COMMIT::8} + # DOCKER_PASS + - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk=" + # DOCKER_USER + - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0=" + +install: + - make install-ci + - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi + +script: + - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi + - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi + +after_success: + - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi + - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi + +after_failure: + - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md index 17014701f4..df6589d767 100644 --- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md +++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md @@ -1,10 +1,20 @@ Changes by Version ================== -2.9.1 (unreleased) ------------------- +2.11.0 (2017-11-27) +------------------- + +- Normalize metric names and tags to be compatible with Prometheus (#222) + + +2.10.0 (2017-11-14) +------------------- -- nothing yet +- Support custom tracing headers (#176) +- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182) +- Do not coerce baggage keys to lower case (#196) +- Log span name when span cannot be reported (#198) +- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219) 2.9.0 (2017-07-29) diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md index ba4d24f473..7bf077d6fd 100644 --- a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md +++ b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md @@ -1,28 +1,72 @@ -# Contributing to `jaeger-client-go` +# How to Contribute to Jaeger -We'd love your help! If you would like to contribute code you can do so through GitHub -by forking the repository and sending a pull request into the `master` branch. +We'd love your help! + +Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub +pull requests. This document outlines some of the conventions on development +workflow, commit message formatting, contact points and other resources to make +it easier to get your contribution accepted. + +We gratefully welcome improvements to documentation as well as to code. + +# Certificate of Origin + +By contributing to this project you agree to the [Developer Certificate of +Origin](https://developercertificate.org/) (DCO). This document was created +by the Linux Kernel community and is a simple statement that you, as a +contributor, have the legal right to make the contribution. See the [DCO](DCO) +file for details. ## Getting Started This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies. -To get started: +To get started, make sure you clone the Git repository into the correct location +`github.com/uber/jaeger-client-go` relative to `$GOPATH`: + +``` +mkdir -p $GOPATH/src/github.com/uber +cd $GOPATH/src/github.com/uber +git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go +cd jaeger-client-go +``` + +Then install dependencies and run the tests: -```bash +``` git submodule update --init --recursive glide install make test ``` +## Imports grouping + +This projects follows the following pattern for grouping imports in Go files: + * imports from standard library + * imports from other projects + * imports from `jaeger-client-go` project + +For example: + +```go +import ( + "fmt" + + "github.com/uber/jaeger-lib/metrics" + "go.uber.org/zap" + + "github.com/uber/jaeger-client-go/config" +) +``` + ## Making A Change *Before making any significant changes, please [open an -issue](https://github.com/uber/jaeger-client-go/issues).* Discussing your proposed +issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed changes ahead of time will make the contribution process smooth for everyone. Once we've discussed your changes and you've got your code ready, make sure -that tests are passing (`make test` or `make cover`) and open your PR! Your +that tests are passing (`make test` or `make cover`) and open your PR. Your pull request is most likely to be accepted if it: * Includes tests for new functionality. @@ -31,37 +75,90 @@ pull request is most likely to be accepted if it: review comments](https://github.com/golang/go/wiki/CodeReviewComments). * Has a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). - -## Cutting a Release - -See [RELEASE.md](./RELEASE.md) +* Each commit must be signed by the author ([see below](#sign-your-work)). ## License By contributing your code, you agree to license your contribution under the terms -of the MIT License: https://github.com/uber/jaeger-client-go/blob/master/LICENSE +of the [Apache License](LICENSE). -If you are adding a new file it should have a header like below. +If you are adding a new file it should have a header like below. The easiest +way to add such header is to run `make fmt`. ``` -// Copyright (c) 2017 Uber Technologies, Inc. +// Copyright (c) 2017 The Jaeger Authors. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. ``` +## Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +If you want this to be automatic you can set up some aliases: + +``` +git config --add alias.amend "commit -s --amend" +git config --add alias.c "commit -s" +``` diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO new file mode 100644 index 0000000000..068953d4bd --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/DCO @@ -0,0 +1,37 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE index 1fe8195977..261eeb9e9f 100644 --- a/vendor/github.com/uber/jaeger-client-go/LICENSE +++ b/vendor/github.com/uber/jaeger-client-go/LICENSE @@ -1,21 +1,201 @@ -The MIT License (MIT) - -Copyright (c) 2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile index c8b0925b60..d2a0fd8d4a 100644 --- a/vendor/github.com/uber/jaeger-client-go/Makefile +++ b/vendor/github.com/uber/jaeger-client-go/Makefile @@ -49,7 +49,8 @@ lint: @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;) @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false) @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG) - @[ ! -s "$(FMT_LOG)" ] || (echo "Go Fmt Failures, run 'make fmt'" | cat - $(FMT_LOG) && false) + ./scripts/updateLicenses.sh >> $(FMT_LOG) + @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false) .PHONY: install @@ -89,16 +90,16 @@ idl-submodule: thrift-image: $(THRIFT) -version -.PHONY: install_ci -install_ci: install +.PHONY: install-ci +install-ci: install go get github.com/wadey/gocovmerge go get github.com/mattn/goveralls go get golang.org/x/tools/cmd/cover go get github.com/golang/lint/golint -.PHONY: test_ci -test_ci: +.PHONY: test-ci +test-ci: @./scripts/cover.sh $(shell go list $(PACKAGES)) make lint diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md index d08f092727..2eb1e83c18 100644 --- a/vendor/github.com/uber/jaeger-client-go/README.md +++ b/vendor/github.com/uber/jaeger-client-go/README.md @@ -6,6 +6,12 @@ This is a client side library that implements an [OpenTracing](http://opentracing.io) Tracer, with Zipkin-compatible data model. +**IMPORTANT**: The library's import path is `github.com/uber/jaeger-client-go`, based on its original location. Do not try to import it as `github.com/jaegertracing/jaeger-client-go`, it will not compile. We might revisit this in the next major release. + +## How to Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md). + ## Installation We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide) @@ -57,19 +63,28 @@ The tracer emits a number of different metrics, defined in tag-based metric names, e.g. instead of `statsd`-style string names like `counters.my-service.jaeger.spans.started.sampled`, the metrics are defined by a short name and a collection of key/value tags, for -example: `name:traces, state:started, sampled:true`. +example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go) +file for the full list and descriptions of emitted metrics. -The monitoring backend is represented by the -[StatsReporter](stats_reporter.go) interface. An implementation -of that interface should be passed to the `New` method during -tracer initialization: +The monitoring backend is represented by the `metrics.Factory` interface from package +[`"github.com/uber/jaeger-lib/metrics"`](github.com/uber/jaeger-lib/metrics). An implementation +of that interface can be passed as an option to either the Configuration object or the Tracer +constructor, for example: ```go - stats := // create StatsReporter implementation - tracer := config.Tracing.New("your-service-name", stats) +import ( + "github.com/uber/jaeger-client-go/config" + "github.com/uber/jaeger-lib/metrics/prometheus" +) + + metricsFactory := prometheus.New() + tracer, closer, err := new(config.Configuration).New( + "your-service-name", + config.Metrics(metricsFactory), + ) ``` -By default, a no-op `NullStatsReporter` is used. +By default, a no-op `metrics.NullFactory` is used. ### Logging @@ -80,12 +95,15 @@ by the [Logger](logger.go) interface. A logger instance implementing this interface can be set on the `Config` object before calling the `New` method. +Besides the [zap](https://github.com/uber-go/zap) implementation +bundled with this package there is also a [go-kit](https://github.com/go-kit/kit) +one in the [jaeger-lib](https://github.com/uber/jaeger-lib) repository. + ## Instrumentation for Tracing Since this tracer is fully compliant with OpenTracing API 1.0, all code instrumentation should only use the API itself, as described -in the [opentracing-go] -(https://github.com/opentracing/opentracing-go) documentation. +in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation. ## Features @@ -134,18 +152,15 @@ are available: ### Baggage Injection -The OpenTracing spec allows for [baggage](https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item), -which are key value pairs that are added to the span context and propagated -throughout the trace. -An external process can inject baggage by setting the special -HTTP Header `jaeger-baggage` on a request +The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added +to the span context and propagated throughout the trace. An external process can inject baggage +by setting the special HTTP Header `jaeger-baggage` on a request: ```sh curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com ``` -Baggage can also be programatically set inside your service by doing -the following +Baggage can also be programatically set inside your service: ```go if span := opentracing.SpanFromContext(ctx); span != nil { @@ -209,14 +224,15 @@ However it is not the default propagation format, see [here](zipkin/README.md#Ne ## License - [The MIT License](LICENSE). +[Apache 2.0 License](LICENSE). [doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg [doc]: https://godoc.org/github.com/uber/jaeger-client-go -[ci-img]: https://travis-ci.org/uber/jaeger-client-go.svg?branch=master -[ci]: https://travis-ci.org/uber/jaeger-client-go -[cov-img]: https://coveralls.io/repos/uber/jaeger-client-go/badge.svg?branch=master&service=github -[cov]: https://coveralls.io/github/uber/jaeger-client-go?branch=master +[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master +[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go +[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg +[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go [ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg [ot-url]: http://opentracing.io +[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go index dd72ea5d5f..1037ca0e86 100644 --- a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go +++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger @@ -44,7 +38,7 @@ func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Me func (s *baggageSetter) setBaggage(span *Span, key, value string) { var truncated bool var prevItem string - restriction := s.restrictionManager.GetRestriction(key) + restriction := s.restrictionManager.GetRestriction(span.serviceName(), key) if !restriction.KeyAllowed() { s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) s.metrics.BaggageUpdateFailure.Inc(1) diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter_test.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter_test.go new file mode 100644 index 0000000000..c0454edfde --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter_test.go @@ -0,0 +1,126 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + "github.com/opentracing/opentracing-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/uber/jaeger-lib/metrics" + "github.com/uber/jaeger-lib/metrics/testutils" + + "github.com/uber/jaeger-client-go/internal/baggage" +) + +func withTracerAndMetrics(f func(tracer *Tracer, metrics *Metrics, factory *metrics.LocalFactory)) { + factory := metrics.NewLocalFactory(0) + m := NewMetrics(factory, nil) + + service := "DOOP" + tracer, closer := NewTracer(service, NewConstSampler(true), NewNullReporter()) + defer closer.Close() + f(tracer.(*Tracer), m, factory) +} + +func TestTruncateBaggage(t *testing.T) { + withTracerAndMetrics(func(tracer *Tracer, metrics *Metrics, factory *metrics.LocalFactory) { + setter := newBaggageSetter(baggage.NewDefaultRestrictionManager(5), metrics) + key := "key" + value := "01234567890" + expected := "01234" + + parent := tracer.StartSpan("parent").(*Span) + parent.context = parent.context.WithBaggageItem(key, value) + span := tracer.StartSpan("child", opentracing.ChildOf(parent.Context())).(*Span) + + setter.setBaggage(span, key, value) + assertBaggageFields(t, span, key, expected, true, true, false) + assert.Equal(t, expected, span.context.baggage[key]) + + testutils.AssertCounterMetrics(t, factory, + testutils.ExpectedMetric{ + Name: "jaeger.baggage_truncations", + Value: 1, + }, + testutils.ExpectedMetric{ + Name: "jaeger.baggage_updates", + Tags: map[string]string{"result": "ok"}, + Value: 1, + }, + ) + }) +} + +type keyNotAllowedBaggageRestrictionManager struct{} + +func (m *keyNotAllowedBaggageRestrictionManager) GetRestriction(service, key string) *baggage.Restriction { + return baggage.NewRestriction(false, 0) +} + +func TestInvalidBaggage(t *testing.T) { + withTracerAndMetrics(func(tracer *Tracer, metrics *Metrics, factory *metrics.LocalFactory) { + setter := newBaggageSetter(&keyNotAllowedBaggageRestrictionManager{}, metrics) + key := "key" + value := "value" + + span := tracer.StartSpan("span").(*Span) + + setter.setBaggage(span, key, value) + assertBaggageFields(t, span, key, value, false, false, true) + assert.Empty(t, span.context.baggage[key]) + + testutils.AssertCounterMetrics(t, factory, + testutils.ExpectedMetric{ + Name: "jaeger.baggage_updates", + Tags: map[string]string{"result": "err"}, + Value: 1, + }, + ) + }) +} + +func TestNotSampled(t *testing.T) { + withTracerAndMetrics(func(_ *Tracer, metrics *Metrics, factory *metrics.LocalFactory) { + tracer, closer := NewTracer("svc", NewConstSampler(false), NewNullReporter()) + defer closer.Close() + + setter := newBaggageSetter(baggage.NewDefaultRestrictionManager(10), metrics) + span := tracer.StartSpan("span").(*Span) + setter.setBaggage(span, "key", "value") + assert.Empty(t, span.logs, "No baggage fields should be created if span is not sampled") + }) +} + +func assertBaggageFields(t *testing.T, sp *Span, key, value string, override, truncated, invalid bool) { + require.Len(t, sp.logs, 1) + keys := map[string]struct{}{} + for _, field := range sp.logs[0].Fields { + keys[field.String()] = struct{}{} + } + assert.Contains(t, keys, "event:baggage") + assert.Contains(t, keys, "key:"+key) + assert.Contains(t, keys, "value:"+value) + if invalid { + assert.Contains(t, keys, "invalid:true") + } + if override { + assert.Contains(t, keys, "override:true") + } + if truncated { + assert.Contains(t, keys, "truncated:true") + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go index 26ee44b99d..306b92fa2a 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/config.go +++ b/vendor/github.com/uber/jaeger-client-go/config/config.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package config @@ -165,6 +159,7 @@ func (c Configuration) New( jaeger.TracerOptions.Metrics(tracerMetrics), jaeger.TracerOptions.Logger(opts.logger), jaeger.TracerOptions.CustomHeaderKeys(c.Headers), + jaeger.TracerOptions.Gen128Bit(opts.gen128Bit), jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan), } @@ -216,7 +211,7 @@ func (c Configuration) InitGlobalTracer( if err != nil { return nil, err } - opentracing.InitGlobalTracer(tracer) + opentracing.SetGlobalTracer(tracer) return closer, nil } diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_test.go b/vendor/github.com/uber/jaeger-client-go/config/config_test.go new file mode 100644 index 0000000000..cff5bf3283 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/config/config_test.go @@ -0,0 +1,259 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/uber/jaeger-lib/metrics" + "github.com/uber/jaeger-lib/metrics/testutils" + + "github.com/uber/jaeger-client-go" + "github.com/uber/jaeger-client-go/log" +) + +func TestNewSamplerConst(t *testing.T) { + constTests := []struct { + param float64 + decision bool + }{{1, true}, {0, false}} + for _, tst := range constTests { + cfg := &SamplerConfig{Type: jaeger.SamplerTypeConst, Param: tst.param} + s, err := cfg.NewSampler("x", nil) + require.NoError(t, err) + s1, ok := s.(*jaeger.ConstSampler) + require.True(t, ok, "converted to constSampler") + require.Equal(t, tst.decision, s1.Decision, "decision") + } +} + +func TestNewSamplerProbabilistic(t *testing.T) { + constTests := []struct { + param float64 + error bool + }{{1.5, true}, {0.5, false}} + for _, tst := range constTests { + cfg := &SamplerConfig{Type: jaeger.SamplerTypeProbabilistic, Param: tst.param} + s, err := cfg.NewSampler("x", nil) + if tst.error { + require.Error(t, err) + } else { + require.NoError(t, err) + _, ok := s.(*jaeger.ProbabilisticSampler) + require.True(t, ok, "converted to ProbabilisticSampler") + } + } +} + +func TestDefaultSampler(t *testing.T) { + cfg := Configuration{ + Sampler: &SamplerConfig{Type: "InvalidType"}, + } + _, _, err := cfg.New("testService") + require.Error(t, err) +} + +func TestInvalidSamplerType(t *testing.T) { + cfg := &SamplerConfig{MaxOperations: 10} + s, err := cfg.NewSampler("x", jaeger.NewNullMetrics()) + require.NoError(t, err) + rcs, ok := s.(*jaeger.RemotelyControlledSampler) + require.True(t, ok, "converted to RemotelyControlledSampler") + rcs.Close() +} + +func TestDefaultConfig(t *testing.T) { + cfg := Configuration{} + _, _, err := cfg.New("", Metrics(metrics.NullFactory), Logger(log.NullLogger)) + require.EqualError(t, err, "no service name provided") + + _, closer, err := cfg.New("testService") + defer closer.Close() + require.NoError(t, err) +} + +func TestDisabledFlag(t *testing.T) { + cfg := Configuration{Disabled: true} + _, closer, err := cfg.New("testService") + defer closer.Close() + require.NoError(t, err) +} + +func TestNewReporterError(t *testing.T) { + cfg := Configuration{ + Reporter: &ReporterConfig{LocalAgentHostPort: "bad_local_agent"}, + } + _, _, err := cfg.New("testService") + require.Error(t, err) +} + +func TestInitGlobalTracer(t *testing.T) { + // Save the existing GlobalTracer and replace after finishing function + prevTracer := opentracing.GlobalTracer() + defer opentracing.SetGlobalTracer(prevTracer) + noopTracer := opentracing.NoopTracer{} + + tests := []struct { + cfg Configuration + shouldErr bool + tracerChanged bool + }{ + { + cfg: Configuration{Disabled: true}, + shouldErr: false, + tracerChanged: false, + }, + { + cfg: Configuration{Sampler: &SamplerConfig{Type: "InvalidType"}}, + shouldErr: true, + tracerChanged: false, + }, + { + cfg: Configuration{ + Sampler: &SamplerConfig{ + Type: "remote", + SamplingRefreshInterval: 1, + }, + }, + shouldErr: false, + tracerChanged: true, + }, + { + cfg: Configuration{}, + shouldErr: false, + tracerChanged: true, + }, + } + for _, test := range tests { + opentracing.SetGlobalTracer(noopTracer) + _, err := test.cfg.InitGlobalTracer("testService") + if test.shouldErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + if test.tracerChanged { + require.NotEqual(t, noopTracer, opentracing.GlobalTracer()) + } else { + require.Equal(t, noopTracer, opentracing.GlobalTracer()) + } + } +} + +func TestConfigWithReporter(t *testing.T) { + c := Configuration{ + Sampler: &SamplerConfig{ + Type: "const", + Param: 1, + }, + } + r := jaeger.NewInMemoryReporter() + tracer, closer, err := c.New("test", Reporter(r)) + require.NoError(t, err) + defer closer.Close() + + tracer.StartSpan("test").Finish() + assert.Len(t, r.GetSpans(), 1) +} + +func TestConfigWithRPCMetrics(t *testing.T) { + metrics := metrics.NewLocalFactory(0) + c := Configuration{ + Sampler: &SamplerConfig{ + Type: "const", + Param: 1, + }, + RPCMetrics: true, + } + r := jaeger.NewInMemoryReporter() + tracer, closer, err := c.New( + "test", + Reporter(r), + Metrics(metrics), + ContribObserver(fakeContribObserver{}), + ) + require.NoError(t, err) + defer closer.Close() + + tracer.StartSpan("test", ext.SpanKindRPCServer).Finish() + + testutils.AssertCounterMetrics(t, metrics, + testutils.ExpectedMetric{ + Name: "jaeger-rpc.requests", + Tags: map[string]string{"component": "jaeger", "endpoint": "test", "error": "false"}, + Value: 1, + }, + ) +} + +func TestBaggageRestrictionsConfig(t *testing.T) { + m := metrics.NewLocalFactory(0) + c := Configuration{ + BaggageRestrictions: &BaggageRestrictionsConfig{ + HostPort: "not:1929213", + RefreshInterval: time.Minute, + }, + } + _, closer, err := c.New( + "test", + Metrics(m), + ) + require.NoError(t, err) + defer closer.Close() + + metricName := "jaeger.baggage_restrictions_updates" + metricTags := map[string]string{"result": "err"} + key := metrics.GetKey(metricName, metricTags, "|", "=") + for i := 0; i < 100; i++ { + // wait until the async initialization call is complete + counters, _ := m.Snapshot() + if _, ok := counters[key]; ok { + break + } + time.Sleep(time.Millisecond) + } + + testutils.AssertCounterMetrics(t, m, + testutils.ExpectedMetric{ + Name: metricName, + Tags: metricTags, + Value: 1, + }, + ) +} + +func TestConfigWithGen128Bit(t *testing.T) { + c := Configuration{ + Sampler: &SamplerConfig{ + Type: "const", + Param: 1, + }, + RPCMetrics: true, + } + tracer, closer, err := c.New("test", Gen128Bit(true)) + require.NoError(t, err) + defer closer.Close() + + span := tracer.StartSpan("test") + defer span.Finish() + traceID := span.Context().(jaeger.SpanContext).TraceID() + require.True(t, traceID.High != 0) + require.True(t, traceID.Low != 0) +} diff --git a/vendor/github.com/uber/jaeger-client-go/config/example_test.go b/vendor/github.com/uber/jaeger-client-go/config/example_test.go new file mode 100644 index 0000000000..befaae8d50 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/config/example_test.go @@ -0,0 +1,84 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config_test + +import ( + "log" + + "github.com/uber/jaeger-lib/metrics" + + "github.com/uber/jaeger-client-go" + jaegercfg "github.com/uber/jaeger-client-go/config" + jaegerlog "github.com/uber/jaeger-client-go/log" +) + +func ExampleConfiguration_InitGlobalTracer_testing() { + // Sample configuration for testing. Use constant sampling to sample every trace + // and enable LogSpan to log every span via configured Logger. + cfg := jaegercfg.Configuration{ + Sampler: &jaegercfg.SamplerConfig{ + Type: jaeger.SamplerTypeConst, + Param: 1, + }, + Reporter: &jaegercfg.ReporterConfig{ + LogSpans: true, + }, + } + + // Example logger and metrics factory. Use github.com/uber/jaeger-client-go/log + // and github.com/uber/jaeger-lib/metrics respectively to bind to real logging and metrics + // frameworks. + jLogger := jaegerlog.StdLogger + jMetricsFactory := metrics.NullFactory + + // Initialize tracer with a logger and a metrics factory + closer, err := cfg.InitGlobalTracer( + "serviceName", + jaegercfg.Logger(jLogger), + jaegercfg.Metrics(jMetricsFactory), + ) + if err != nil { + log.Printf("Could not initialize jaeger tracer: %s", err.Error()) + return + } + defer closer.Close() + + // continue main() +} + +func ExampleConfiguration_InitGlobalTracer_production() { + // Recommended configuration for production. + cfg := jaegercfg.Configuration{} + + // Example logger and metrics factory. Use github.com/uber/jaeger-client-go/log + // and github.com/uber/jaeger-lib/metrics respectively to bind to real logging and metrics + // frameworks. + jLogger := jaegerlog.StdLogger + jMetricsFactory := metrics.NullFactory + + // Initialize tracer with a logger and a metrics factory + closer, err := cfg.InitGlobalTracer( + "serviceName", + jaegercfg.Logger(jLogger), + jaegercfg.Metrics(jMetricsFactory), + ) + if err != nil { + log.Printf("Could not initialize jaeger tracer: %s", err.Error()) + return + } + defer closer.Close() + + // continue main() +} diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go index 99f3ca6bda..76486440ac 100644 --- a/vendor/github.com/uber/jaeger-client-go/config/options.go +++ b/vendor/github.com/uber/jaeger-client-go/config/options.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package config @@ -37,6 +31,7 @@ type Options struct { reporter jaeger.Reporter contribObservers []jaeger.ContribObserver observers []jaeger.Observer + gen128Bit bool zipkinSharedRPCSpan bool tags []opentracing.Tag } @@ -80,6 +75,13 @@ func ContribObserver(observer jaeger.ContribObserver) Option { } } +// Gen128Bit specifies whether to generate 128bit trace IDs. +func Gen128Bit(gen128Bit bool) Option { + return func(c *Options) { + c.gen128Bit = gen128Bit + } +} + // ZipkinSharedRPCSpan creates an option that enables sharing span ID between client // and server spans a la zipkin. If false, client and server spans will be assigned // different IDs. diff --git a/vendor/github.com/uber/jaeger-client-go/config/options_test.go b/vendor/github.com/uber/jaeger-client-go/config/options_test.go new file mode 100644 index 0000000000..01f338026a --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/config/options_test.go @@ -0,0 +1,72 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/uber/jaeger-lib/metrics" + + "github.com/uber/jaeger-client-go" +) + +func TestApplyOptions(t *testing.T) { + metricsFactory := metrics.NewLocalFactory(0) + observer := fakeObserver{} + contribObserver := fakeContribObserver{} + opts := applyOptions( + Metrics(metricsFactory), + Logger(jaeger.StdLogger), + Observer(observer), + ContribObserver(contribObserver), + Gen128Bit(true), + ZipkinSharedRPCSpan(true), + ) + assert.Equal(t, jaeger.StdLogger, opts.logger) + assert.Equal(t, metricsFactory, opts.metrics) + assert.Equal(t, []jaeger.Observer{observer}, opts.observers) + assert.Equal(t, []jaeger.ContribObserver{contribObserver}, opts.contribObservers) + assert.True(t, opts.gen128Bit) + assert.True(t, opts.zipkinSharedRPCSpan) +} + +func TestTraceTagOption(t *testing.T) { + c := Configuration{} + tracer, closer, err := c.New("test-service", Tag("tag-key", "tag-value")) + require.NoError(t, err) + defer closer.Close() + assert.Equal(t, opentracing.Tag{Key: "tag-key", Value: "tag-value"}, tracer.(*jaeger.Tracer).Tags()[0]) +} + +func TestApplyOptionsDefaults(t *testing.T) { + opts := applyOptions() + assert.Equal(t, jaeger.NullLogger, opts.logger) + assert.Equal(t, metrics.NullFactory, opts.metrics) +} + +type fakeObserver struct{} + +func (o fakeObserver) OnStartSpan(operationName string, options opentracing.StartSpanOptions) jaeger.SpanObserver { + return nil +} + +type fakeContribObserver struct{} + +func (o fakeContribObserver) OnStartSpan(span opentracing.Span, operationName string, options opentracing.StartSpanOptions) (jaeger.ContribSpanObserver, bool) { + return nil, false +} diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go index 65978b8be9..79c1375385 100644 --- a/vendor/github.com/uber/jaeger-client-go/constants.go +++ b/vendor/github.com/uber/jaeger-client-go/constants.go @@ -1,28 +1,22 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger const ( // JaegerClientVersion is the version of the client library reported as Span tag. - JaegerClientVersion = "Go-2.9.1dev" + JaegerClientVersion = "Go-2.11.0" // JaegerClientVersionTagKey is the name of the tag used to report client version. JaegerClientVersionTagKey = "jaeger.version" @@ -77,6 +71,6 @@ const ( SamplerTypeRateLimiting = "ratelimiting" // SamplerTypeLowerBound is the type of sampler that samples - // only up to a fixed number of traces per second. + // at least a fixed number of traces per second. SamplerTypeLowerBound = "lowerbound" ) diff --git a/vendor/github.com/uber/jaeger-client-go/constants_test.go b/vendor/github.com/uber/jaeger-client-go/constants_test.go new file mode 100644 index 0000000000..0beae1912b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/constants_test.go @@ -0,0 +1,29 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "strings" + "testing" +) + +func TestHeaderConstants(t *testing.T) { + if TraceContextHeaderName != strings.ToLower(TraceContextHeaderName) { + t.Errorf("TraceContextHeaderName is not lower-case: %+v", TraceContextHeaderName) + } + if TraceBaggageHeaderPrefix != strings.ToLower(TraceBaggageHeaderPrefix) { + t.Errorf("TraceBaggageHeaderPrefix is not lower-case: %+v", TraceBaggageHeaderPrefix) + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/context.go b/vendor/github.com/uber/jaeger-client-go/context.go index d3dcf9b542..8b06173d98 100644 --- a/vendor/github.com/uber/jaeger-client-go/context.go +++ b/vendor/github.com/uber/jaeger-client-go/context.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/context_test.go b/vendor/github.com/uber/jaeger-client-go/context_test.go new file mode 100644 index 0000000000..34dfc74570 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/context_test.go @@ -0,0 +1,110 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestContextFromString(t *testing.T) { + var err error + _, err = ContextFromString("") + assert.Error(t, err) + _, err = ContextFromString("abcd") + assert.Error(t, err) + _, err = ContextFromString("x:1:1:1") + assert.Error(t, err) + _, err = ContextFromString("1:x:1:1") + assert.Error(t, err) + _, err = ContextFromString("1:1:x:1") + assert.Error(t, err) + _, err = ContextFromString("1:1:1:x") + assert.Error(t, err) + _, err = ContextFromString("1:1:1:x") + assert.Error(t, err) + _, err = ContextFromString("01234567890123456789012345678901234:1:1:1") + assert.Error(t, err) + _, err = ContextFromString("01234567890123456789012345678901:1:1:1") + assert.NoError(t, err) + _, err = ContextFromString("01234_67890123456789012345678901:1:1:1") + assert.Error(t, err) + _, err = ContextFromString("0123456789012345678901_345678901:1:1:1") + assert.Error(t, err) + _, err = ContextFromString("1:0123456789012345:1:1") + assert.NoError(t, err) + _, err = ContextFromString("1:01234567890123456:1:1") + assert.Error(t, err) + ctx, err := ContextFromString("10000000000000001:1:1:1") + assert.NoError(t, err) + assert.EqualValues(t, TraceID{High: 1, Low: 1}, ctx.traceID) + ctx, err = ContextFromString("1:1:1:1") + assert.NoError(t, err) + assert.EqualValues(t, TraceID{Low: 1}, ctx.traceID) + assert.EqualValues(t, 1, ctx.spanID) + assert.EqualValues(t, 1, ctx.parentID) + assert.EqualValues(t, 1, ctx.flags) + ctx = NewSpanContext(TraceID{Low: 1}, 1, 1, true, nil) + assert.EqualValues(t, TraceID{Low: 1}, ctx.traceID) + assert.EqualValues(t, 1, ctx.spanID) + assert.EqualValues(t, 1, ctx.parentID) + assert.EqualValues(t, 1, ctx.flags) + assert.Equal(t, "ff", SpanID(255).String()) + assert.Equal(t, "ff", TraceID{Low: 255}.String()) + assert.Equal(t, "ff00000000000000ff", TraceID{High: 255, Low: 255}.String()) + ctx = NewSpanContext(TraceID{High: 255, Low: 255}, SpanID(1), SpanID(1), false, nil) + assert.Equal(t, "ff00000000000000ff:1:1:0", ctx.String()) +} + +func TestSpanContext_WithBaggageItem(t *testing.T) { + var ctx SpanContext + ctx = ctx.WithBaggageItem("some-KEY", "Some-Value") + assert.Equal(t, map[string]string{"some-KEY": "Some-Value"}, ctx.baggage) + ctx = ctx.WithBaggageItem("some-KEY", "Some-Other-Value") + assert.Equal(t, map[string]string{"some-KEY": "Some-Other-Value"}, ctx.baggage) +} + +func TestSpanContext_SampledDebug(t *testing.T) { + ctx, err := ContextFromString("1:1:1:1") + require.NoError(t, err) + assert.True(t, ctx.IsSampled()) + assert.False(t, ctx.IsDebug()) + + ctx, err = ContextFromString("1:1:1:3") + require.NoError(t, err) + assert.True(t, ctx.IsSampled()) + assert.True(t, ctx.IsDebug()) + + ctx, err = ContextFromString("1:1:1:0") + require.NoError(t, err) + assert.False(t, ctx.IsSampled()) + assert.False(t, ctx.IsDebug()) +} + +func TestSpanContext_CopyFrom(t *testing.T) { + ctx, err := ContextFromString("1:1:1:1") + require.NoError(t, err) + ctx2 := SpanContext{} + ctx2.CopyFrom(&ctx) + assert.Equal(t, ctx, ctx2) + // with baggage + ctx = ctx.WithBaggageItem("x", "y") + ctx2 = SpanContext{} + ctx2.CopyFrom(&ctx) + assert.Equal(t, ctx, ctx2) + assert.Equal(t, "y", ctx2.baggage["x"]) +} diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go index 2fe28d2613..4ce1881f3b 100644 --- a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go +++ b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go index 6a0ad6ad36..4f5549033d 100644 --- a/vendor/github.com/uber/jaeger-client-go/doc.go +++ b/vendor/github.com/uber/jaeger-client-go/doc.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. /* Package jaeger implements an OpenTracing (http://opentracing.io) Tracer. diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock index 84a832e05d..9ad7a140ae 100644 --- a/vendor/github.com/uber/jaeger-client-go/glide.lock +++ b/vendor/github.com/uber/jaeger-client-go/glide.lock @@ -1,30 +1,60 @@ -hash: 50e5f204fc6ee9ff66efb1275c7d12f78f8f21a2de98ed38c947d01eb2064b0f -updated: 2017-07-28T19:53:12.495163139-04:00 +hash: af5e193de27f73f5a8cef66ae2f0c013bcb9e48ecd69db4a595221f88ba99a71 +updated: 2017-11-21T19:49:03.248636345-05:00 imports: - name: github.com/apache/thrift version: b2a4d4ae21c789b689dd162deb819665567f481c subpackages: - lib/go/thrift +- name: github.com/beorn7/perks + version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 + subpackages: + - quantile - name: github.com/codahale/hdrhistogram - version: 3a0bb77429bd3a61596f5e8a3172445844342120 + version: f8ad88b59a584afeee9d334eff879b104439117b - name: github.com/crossdock/crossdock-go version: 049aabb0122b03bc9bd30cab8f3f91fb60166361 subpackages: - assert - require - name: github.com/davecgh/go-spew - version: adab96458c51a58dc1783b3335dcce5461522e75 + version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 subpackages: - spew +- name: github.com/golang/protobuf + version: 7cc19b78d562895b13596ddce7aafb59dd789318 + subpackages: + - proto +- name: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c + subpackages: + - pbutil - name: github.com/opentracing/opentracing-go version: 1949ddbfd147afd4d964a9f00b24eb291e0e7c38 subpackages: - ext - log - name: github.com/pmezard/go-difflib - version: 792786c7400a136282c1664665ae0a8db921c6c2 + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d subpackages: - difflib +- name: github.com/prometheus/client_golang + version: c5b7fccd204277076155f10851dad72b76a49317 + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: 6f3806018612930941127f2a7c6c453ba2c527d2 + subpackages: + - go +- name: github.com/prometheus/common + version: 49fee292b27bfff7f354ee0f64e1bc4850462edf + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: a1dba9ce8baed984a2495b658c82687f8157b98f + subpackages: + - xfs - name: github.com/stretchr/testify version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 subpackages: @@ -32,14 +62,17 @@ imports: - require - suite - name: github.com/uber-go/atomic - version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf + version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 + subpackages: + - utils - name: github.com/uber/jaeger-lib - version: e3c1d3b562900c6ac0a7ded654cb95d88e72b63e + version: c48167d9cae5887393dd5e61efd06a4a48b7fbb3 subpackages: - metrics + - metrics/prometheus - metrics/testutils - name: github.com/uber/tchannel-go - version: b99c1d7cecb0fdc882bed0098e7cae6ec7459059 + version: cc230a2942d078a8b01f4a79895dad62e6c572f1 subpackages: - atomic - internal/argreader @@ -52,18 +85,19 @@ imports: - trand - typed - name: go.uber.org/atomic - version: 4e336646b2ef9fc6e47be8e21594178f98e5ebcf + version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 +- name: go.uber.org/multierr + version: 3c4937480c32f4c13a875a1829af76c98ca3d40a - name: go.uber.org/zap - version: 6a4e056f2cc954cfec3581729e758909604b3f76 + version: 35aad584952c3e7020db7b839f6b102de6271f89 subpackages: - buffer - internal/bufferpool - internal/color - internal/exit - - internal/multierror - zapcore - name: golang.org/x/net - version: f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f + version: a337091b0525af65de94df2eb7e98bd9962dcbe2 subpackages: - bpf - context diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml index fd93b2473e..c0b6bcbfb3 100644 --- a/vendor/github.com/uber/jaeger-client-go/glide.yaml +++ b/vendor/github.com/uber/jaeger-client-go/glide.yaml @@ -29,5 +29,9 @@ import: - suite - package: github.com/crossdock/crossdock-go - package: github.com/uber/jaeger-lib + version: ^1.2.1 subpackages: - metrics +testImport: +- package: github.com/prometheus/client_golang + version: v0.8.0 diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go index 0044186d2e..19c2c055b8 100644 --- a/vendor/github.com/uber/jaeger-client-go/header.go +++ b/vendor/github.com/uber/jaeger-client-go/header.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/header_test.go b/vendor/github.com/uber/jaeger-client-go/header_test.go new file mode 100644 index 0000000000..d6a481a84c --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/header_test.go @@ -0,0 +1,50 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSetDefaultOrCustom(t *testing.T) { + assert.Equal(t, (&HeadersConfig{}).applyDefaults(), getDefaultHeadersConfig()) + assert.Equal(t, (&HeadersConfig{ + JaegerDebugHeader: "custom-jaeger-debug-header", + }).applyDefaults(), &HeadersConfig{ + JaegerDebugHeader: "custom-jaeger-debug-header", + JaegerBaggageHeader: JaegerBaggageHeader, + TraceContextHeaderName: TraceContextHeaderName, + TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix, + }) + + customHeaders := &HeadersConfig{ + JaegerDebugHeader: "custom-jaeger-debug-header", + JaegerBaggageHeader: "custom-jaeger-baggage-header", + TraceContextHeaderName: "custom-tracer-state-header-name", + TraceBaggageHeaderPrefix: "custom-tracer-baggage-header-prefix", + } + assert.Equal(t, customHeaders.applyDefaults(), customHeaders) +} + +func TestGetDefaultHeadersConfig(t *testing.T) { + assert.Equal(t, getDefaultHeadersConfig(), &HeadersConfig{ + JaegerDebugHeader: JaegerDebugHeader, + JaegerBaggageHeader: JaegerBaggageHeader, + TraceContextHeaderName: TraceContextHeaderName, + TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix, + }) +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go index 52e2c7b4f9..745729319f 100644 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package remote diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go index d17ca671f8..a56515acab 100644 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package remote @@ -96,7 +90,7 @@ func (m *RestrictionManager) isReady() bool { } // GetRestriction implements RestrictionManager#GetRestriction. -func (m *RestrictionManager) GetRestriction(key string) *baggage.Restriction { +func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction { m.mux.RLock() defer m.mux.RUnlock() if !m.initialized { diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager_test.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager_test.go new file mode 100644 index 0000000000..c0091eb44e --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager_test.go @@ -0,0 +1,220 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/uber-go/atomic" + "github.com/uber/jaeger-lib/metrics" + "github.com/uber/jaeger-lib/metrics/testutils" + + "github.com/uber/jaeger-client-go" + "github.com/uber/jaeger-client-go/internal/baggage" + thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage" +) + +const ( + service = "svc" + expectedKey = "key" + expectedSize = 10 +) + +var ( + testRestrictions = []*thrift.BaggageRestriction{ + {BaggageKey: expectedKey, MaxValueLength: int32(expectedSize)}, + } +) + +var _ io.Closer = new(RestrictionManager) // API check + +type baggageHandler struct { + returnError *atomic.Bool + restrictions []*thrift.BaggageRestriction +} + +func (h *baggageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if h.returnError.Load() { + w.WriteHeader(http.StatusInternalServerError) + } else { + bytes, _ := json.Marshal(h.restrictions) + w.Header().Add("Content-Type", "application/json") + w.Write(bytes) + } +} + +func (h *baggageHandler) setReturnError(b bool) { + h.returnError.Store(b) +} + +func withHTTPServer( + restrictions []*thrift.BaggageRestriction, + f func( + metrics *jaeger.Metrics, + factory *metrics.LocalFactory, + handler *baggageHandler, + server *httptest.Server, + ), +) { + factory := metrics.NewLocalFactory(0) + m := jaeger.NewMetrics(factory, nil) + + handler := &baggageHandler{returnError: atomic.NewBool(true), restrictions: restrictions} + server := httptest.NewServer(handler) + defer server.Close() + + f(m, factory, handler, server) +} + +func TestNewRemoteRestrictionManager(t *testing.T) { + withHTTPServer( + testRestrictions, + func( + metrics *jaeger.Metrics, + factory *metrics.LocalFactory, + handler *baggageHandler, + server *httptest.Server, + ) { + handler.setReturnError(false) + mgr := NewRestrictionManager( + service, + Options.HostPort(getHostPort(t, server.URL)), + Options.Metrics(metrics), + Options.Logger(jaeger.NullLogger), + ) + defer mgr.Close() + + for i := 0; i < 100; i++ { + if mgr.isReady() { + break + } + time.Sleep(time.Millisecond) + } + require.True(t, mgr.isReady()) + + restriction := mgr.GetRestriction(service, expectedKey) + assert.EqualValues(t, baggage.NewRestriction(true, expectedSize), restriction) + + badKey := "bad-key" + restriction = mgr.GetRestriction(service, badKey) + assert.EqualValues(t, baggage.NewRestriction(false, 0), restriction) + + testutils.AssertCounterMetrics(t, factory, + testutils.ExpectedMetric{ + Name: "jaeger.baggage_restrictions_updates", + Tags: map[string]string{"result": "ok"}, + Value: 1, + }, + ) + }) +} + +func TestDenyBaggageOnInitializationFailure(t *testing.T) { + withHTTPServer( + testRestrictions, + func( + m *jaeger.Metrics, + factory *metrics.LocalFactory, + handler *baggageHandler, + server *httptest.Server, + ) { + mgr := NewRestrictionManager( + service, + Options.DenyBaggageOnInitializationFailure(true), + Options.HostPort(getHostPort(t, server.URL)), + Options.Metrics(m), + Options.Logger(jaeger.NullLogger), + ) + require.False(t, mgr.isReady()) + + metricName := "jaeger.baggage_restrictions_updates" + metricTags := map[string]string{"result": "err"} + key := metrics.GetKey(metricName, metricTags, "|", "=") + for i := 0; i < 100; i++ { + // wait until the async initialization call is complete + counters, _ := factory.Snapshot() + if _, ok := counters[key]; ok { + break + } + time.Sleep(time.Millisecond) + } + + testutils.AssertCounterMetrics(t, factory, + testutils.ExpectedMetric{ + Name: metricName, + Tags: metricTags, + Value: 1, + }, + ) + + // DenyBaggageOnInitializationFailure should not allow any key to be written + restriction := mgr.GetRestriction(service, expectedKey) + assert.EqualValues(t, baggage.NewRestriction(false, 0), restriction) + + // have the http server return restrictions + handler.setReturnError(false) + mgr.updateRestrictions() + + // Wait until manager retrieves baggage restrictions + for i := 0; i < 100; i++ { + if mgr.isReady() { + break + } + time.Sleep(time.Millisecond) + } + require.True(t, mgr.isReady()) + + restriction = mgr.GetRestriction(service, expectedKey) + assert.EqualValues(t, baggage.NewRestriction(true, expectedSize), restriction) + }) +} + +func TestAllowBaggageOnInitializationFailure(t *testing.T) { + withHTTPServer( + testRestrictions, + func( + metrics *jaeger.Metrics, + factory *metrics.LocalFactory, + handler *baggageHandler, + server *httptest.Server, + ) { + mgr := NewRestrictionManager( + service, + Options.RefreshInterval(time.Millisecond), + Options.HostPort(getHostPort(t, server.URL)), + Options.Metrics(metrics), + Options.Logger(jaeger.NullLogger), + ) + require.False(t, mgr.isReady()) + + // AllowBaggageOnInitializationFailure should allow any key to be written + restriction := mgr.GetRestriction(service, expectedKey) + assert.EqualValues(t, baggage.NewRestriction(true, 2048), restriction) + }) +} + +func getHostPort(t *testing.T, s string) string { + u, err := url.Parse(s) + require.NoError(t, err, "Failed to parse url") + return u.Host +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go index a3be272c31..c16a5c5662 100644 --- a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package baggage @@ -48,9 +42,12 @@ func (r *Restriction) MaxValueLength() int { return r.maxValueLength } -// RestrictionManager keeps track of valid baggage keys and their restrictions. +// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager +// will return a Restriction for a specific baggage key which will determine whether the baggage +// key is allowed for the current service and any other applicable restrictions on the baggage +// value. type RestrictionManager interface { - GetRestriction(key string) *Restriction + GetRestriction(service, key string) *Restriction } // DefaultRestrictionManager allows any baggage key. @@ -69,6 +66,6 @@ func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager } // GetRestriction implements RestrictionManager#GetRestriction. -func (m *DefaultRestrictionManager) GetRestriction(key string) *Restriction { +func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction { return m.defaultRestriction } diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager_test.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager_test.go new file mode 100644 index 0000000000..b91d866d5e --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager_test.go @@ -0,0 +1,29 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var _ RestrictionManager = &DefaultRestrictionManager{} + +func TestDefaultRestrictionManager(t *testing.T) { + mgr := NewDefaultRestrictionManager(0) + restriction := mgr.GetRestriction("svc", "key") + assert.EqualValues(t, NewRestriction(true, 2048), restriction) +} diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go index 283deb1ab5..0e10b8a5aa 100644 --- a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go +++ b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package spanlog diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go index 23becc2c7b..8402d087c2 100644 --- a/vendor/github.com/uber/jaeger-client-go/interop.go +++ b/vendor/github.com/uber/jaeger-client-go/interop.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go index 63a7db7b58..868b2a5b54 100644 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go +++ b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go index 3335b5c89c..338a345822 100644 --- a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go +++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span_test.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span_test.go new file mode 100644 index 0000000000..995d0ddbac --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span_test.go @@ -0,0 +1,388 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" + "github.com/uber/jaeger-client-go/utils" +) + +var ( + someString = "str" + someBool = true + someLong = int64(123) + someDouble = float64(123) + someBinary = []byte("hello") + someSlice = []string{"a"} + someSliceString = "[a]" +) + +func TestBuildJaegerThrift(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + + sp1 := tracer.StartSpan("sp1").(*Span) + ext.SpanKindRPCServer.Set(sp1) + ext.PeerService.Set(sp1, "svc") + sp2 := tracer.StartSpan("sp2", opentracing.ChildOf(sp1.Context())).(*Span) + ext.SpanKindRPCClient.Set(sp2) + sp2.Finish() + sp1.Finish() + + jaegerSpan1 := BuildJaegerThrift(sp1) + jaegerSpan2 := BuildJaegerThrift(sp2) + assert.Equal(t, "sp1", jaegerSpan1.OperationName) + assert.Equal(t, "sp2", jaegerSpan2.OperationName) + assert.EqualValues(t, 0, jaegerSpan1.ParentSpanId) + assert.Equal(t, jaegerSpan1.SpanId, jaegerSpan2.ParentSpanId) + assert.Len(t, jaegerSpan1.Tags, 4) + tag := findTag(jaegerSpan1, SamplerTypeTagKey) + assert.Equal(t, SamplerTypeConst, *tag.VStr) + tag = findTag(jaegerSpan1, string(ext.SpanKind)) + assert.Equal(t, string(ext.SpanKindRPCServerEnum), *tag.VStr) + tag = findTag(jaegerSpan1, string(ext.PeerService)) + assert.Equal(t, "svc", *tag.VStr) + assert.Empty(t, jaegerSpan1.References) + + assert.Len(t, jaegerSpan2.References, 1) + assert.Equal(t, j.SpanRefType_CHILD_OF, jaegerSpan2.References[0].RefType) + assert.EqualValues(t, jaegerSpan1.TraceIdLow, jaegerSpan2.References[0].TraceIdLow) + assert.EqualValues(t, jaegerSpan1.TraceIdHigh, jaegerSpan2.References[0].TraceIdHigh) + assert.EqualValues(t, jaegerSpan1.SpanId, jaegerSpan2.References[0].SpanId) + tag = findTag(jaegerSpan2, string(ext.SpanKind)) + assert.Equal(t, string(ext.SpanKindRPCClientEnum), *tag.VStr) +} + +func TestBuildJaegerProcessThrift(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + + sp := tracer.StartSpan("sp1").(*Span) + sp.Finish() + + process := BuildJaegerProcessThrift(sp) + assert.Equal(t, process.ServiceName, "DOOP") + require.Len(t, process.Tags, 3) + assert.NotNil(t, findJaegerTag("jaeger.version", process.Tags)) + assert.NotNil(t, findJaegerTag("hostname", process.Tags)) + assert.NotNil(t, findJaegerTag("ip", process.Tags)) +} + +func TestBuildLogs(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + root := tracer.StartSpan("s1") + + someTime := time.Now().Add(-time.Minute) + someTimeInt64 := utils.TimeToMicrosecondsSinceEpochInt64(someTime) + + errString := "error" + + tests := []struct { + field log.Field + logFunc func(sp opentracing.Span) + expected []*j.Tag + expectedTimestamp int64 + disableSampling bool + }{ + {field: log.String("event", someString), expected: []*j.Tag{{Key: "event", VType: j.TagType_STRING, VStr: &someString}}}, + {field: log.String("k", someString), expected: []*j.Tag{{Key: "k", VType: j.TagType_STRING, VStr: &someString}}}, + {field: log.Bool("k", someBool), expected: []*j.Tag{{Key: "k", VType: j.TagType_BOOL, VBool: &someBool}}}, + {field: log.Int("k", 123), expected: []*j.Tag{{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}}, + {field: log.Int32("k", 123), expected: []*j.Tag{{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}}, + {field: log.Int64("k", 123), expected: []*j.Tag{{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}}, + {field: log.Uint32("k", 123), expected: []*j.Tag{{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}}, + {field: log.Uint64("k", 123), expected: []*j.Tag{{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}}, + {field: log.Float32("k", 123), expected: []*j.Tag{{Key: "k", VType: j.TagType_DOUBLE, VDouble: &someDouble}}}, + {field: log.Float64("k", 123), expected: []*j.Tag{{Key: "k", VType: j.TagType_DOUBLE, VDouble: &someDouble}}}, + {field: log.Error(errors.New(errString)), expected: []*j.Tag{{Key: "error", VType: j.TagType_STRING, VStr: &errString}}}, + {field: log.Object("k", someSlice), expected: []*j.Tag{{Key: "k", VType: j.TagType_STRING, VStr: &someSliceString}}}, + { + field: log.Lazy(func(fv log.Encoder) { + fv.EmitBool("k", someBool) + }), + expected: []*j.Tag{{Key: "k", VType: j.TagType_BOOL, VBool: &someBool}}, + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogKV("event", someString) + }, + expected: []*j.Tag{{Key: "event", VType: j.TagType_STRING, VStr: &someString}}, + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogKV("non-even number of arguments") + }, + // this is a bit fragile, but ¯\_(ツ)_/¯ + expected: []*j.Tag{ + {Key: "error", VType: j.TagType_STRING, VStr: getStringPtr("non-even keyValues len: 1")}, + {Key: "function", VType: j.TagType_STRING, VStr: getStringPtr("LogKV")}, + }, + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogEvent(someString) + }, + expected: []*j.Tag{{Key: "event", VType: j.TagType_STRING, VStr: &someString}}, + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogEventWithPayload(someString, "payload") + }, + expected: []*j.Tag{ + {Key: "event", VType: j.TagType_STRING, VStr: &someString}, + {Key: "payload", VType: j.TagType_STRING, VStr: getStringPtr("payload")}, + }, + }, + { + logFunc: func(sp opentracing.Span) { + sp.Log(opentracing.LogData{Event: someString}) + }, + expected: []*j.Tag{{Key: "event", VType: j.TagType_STRING, VStr: &someString}}, + }, + { + logFunc: func(sp opentracing.Span) { + sp.Log(opentracing.LogData{Event: someString, Payload: "payload"}) + }, + expected: []*j.Tag{ + {Key: "event", VType: j.TagType_STRING, VStr: &someString}, + {Key: "payload", VType: j.TagType_STRING, VStr: getStringPtr("payload")}, + }, + }, + { + logFunc: func(sp opentracing.Span) { + sp.FinishWithOptions(opentracing.FinishOptions{ + LogRecords: []opentracing.LogRecord{ + { + Timestamp: someTime, + Fields: []log.Field{log.String("event", someString)}, + }, + }, + }) + }, + expected: []*j.Tag{{Key: "event", VType: j.TagType_STRING, VStr: &someString}}, + expectedTimestamp: someTimeInt64, + }, + { + logFunc: func(sp opentracing.Span) { + sp.FinishWithOptions(opentracing.FinishOptions{ + BulkLogData: []opentracing.LogData{ + { + Timestamp: someTime, + Event: someString, + }, + }, + }) + }, + expected: []*j.Tag{{Key: "event", VType: j.TagType_STRING, VStr: &someString}}, + expectedTimestamp: someTimeInt64, + }, + { + logFunc: func(sp opentracing.Span) { + sp.FinishWithOptions(opentracing.FinishOptions{ + BulkLogData: []opentracing.LogData{ + { + Timestamp: someTime, + Event: someString, + Payload: "payload", + }, + }, + }) + }, + expected: []*j.Tag{ + {Key: "event", VType: j.TagType_STRING, VStr: &someString}, + {Key: "payload", VType: j.TagType_STRING, VStr: getStringPtr("payload")}, + }, + expectedTimestamp: someTimeInt64, + }, + { + disableSampling: true, + field: log.String("event", someString), + }, + { + disableSampling: true, + logFunc: func(sp opentracing.Span) { + sp.LogKV("event", someString) + }, + }, + } + for i, test := range tests { + testName := fmt.Sprintf("test-%02d", i) + sp := tracer.StartSpan(testName, opentracing.ChildOf(root.Context())) + if test.disableSampling { + ext.SamplingPriority.Set(sp, 0) + } + if test.logFunc != nil { + test.logFunc(sp) + } else if test.field != (log.Field{}) { + sp.LogFields(test.field) + } + jaegerSpan := BuildJaegerThrift(sp.(*Span)) + if test.disableSampling { + assert.Equal(t, 0, len(jaegerSpan.Logs), testName) + continue + } + assert.Equal(t, 1, len(jaegerSpan.Logs), testName) + compareTagSlices(t, test.expected, jaegerSpan.Logs[0].GetFields(), testName) + if test.expectedTimestamp != 0 { + assert.Equal(t, test.expectedTimestamp, jaegerSpan.Logs[0].Timestamp, testName) + } + } +} + +func TestBuildTags(t *testing.T) { + tests := []struct { + tag Tag + expected *j.Tag + }{ + {tag: Tag{key: "k", value: someString}, expected: &j.Tag{Key: "k", VType: j.TagType_STRING, VStr: &someString}}, + {tag: Tag{key: "k", value: int(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: uint(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: int8(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: uint8(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: int16(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: uint16(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: int32(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: uint32(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: int64(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: uint64(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_LONG, VLong: &someLong}}, + {tag: Tag{key: "k", value: float32(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_DOUBLE, VDouble: &someDouble}}, + {tag: Tag{key: "k", value: float64(123)}, expected: &j.Tag{Key: "k", VType: j.TagType_DOUBLE, VDouble: &someDouble}}, + {tag: Tag{key: "k", value: someBool}, expected: &j.Tag{Key: "k", VType: j.TagType_BOOL, VBool: &someBool}}, + {tag: Tag{key: "k", value: someBinary}, expected: &j.Tag{Key: "k", VType: j.TagType_BINARY, VBinary: someBinary}}, + {tag: Tag{key: "k", value: someSlice}, expected: &j.Tag{Key: "k", VType: j.TagType_STRING, VStr: &someSliceString}}, + } + for i, test := range tests { + testName := fmt.Sprintf("test-%02d", i) + actual := buildTags([]Tag{test.tag}) + assert.Len(t, actual, 1) + compareTags(t, test.expected, actual[0], testName) + } +} + +func TestBuildReferences(t *testing.T) { + references := []Reference{ + {Type: opentracing.ChildOfRef, Context: SpanContext{traceID: TraceID{High: 1, Low: 1}, spanID: SpanID(1)}}, + {Type: opentracing.FollowsFromRef, Context: SpanContext{traceID: TraceID{High: 2, Low: 2}, spanID: SpanID(2)}}, + } + spanRefs := buildReferences(references) + assert.Len(t, spanRefs, 2) + assert.Equal(t, j.SpanRefType_CHILD_OF, spanRefs[0].RefType) + assert.EqualValues(t, 1, spanRefs[0].SpanId) + assert.EqualValues(t, 1, spanRefs[0].TraceIdHigh) + assert.EqualValues(t, 1, spanRefs[0].TraceIdLow) + + assert.Equal(t, j.SpanRefType_FOLLOWS_FROM, spanRefs[1].RefType) + assert.EqualValues(t, 2, spanRefs[1].SpanId) + assert.EqualValues(t, 2, spanRefs[1].TraceIdHigh) + assert.EqualValues(t, 2, spanRefs[1].TraceIdLow) +} + +func TestJaegerSpanBaggageLogs(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + + sp := tracer.StartSpan("s1").(*Span) + sp.SetBaggageItem("auth.token", "token") + ext.SpanKindRPCServer.Set(sp) + sp.Finish() + + jaegerSpan := BuildJaegerThrift(sp) + require.Len(t, jaegerSpan.Logs, 1) + fields := jaegerSpan.Logs[0].Fields + require.Len(t, fields, 3) + assertJaegerTag(t, fields, "event", "baggage") + assertJaegerTag(t, fields, "key", "auth.token") + assertJaegerTag(t, fields, "value", "token") +} + +func assertJaegerTag(t *testing.T, tags []*j.Tag, key string, value string) { + tag := findJaegerTag(key, tags) + require.NotNil(t, tag) + assert.Equal(t, value, tag.GetVStr()) +} + +func getStringPtr(s string) *string { + return &s +} + +func findTag(span *j.Span, key string) *j.Tag { + for _, s := range span.Tags { + if s.Key == key { + return s + } + } + return nil +} + +func findJaegerTag(key string, tags []*j.Tag) *j.Tag { + for _, tag := range tags { + if tag.Key == key { + return tag + } + } + return nil +} + +func compareTagSlices(t *testing.T, expectedTags, actualTags []*j.Tag, testName string) { + assert.Equal(t, len(expectedTags), len(actualTags)) + for _, expectedTag := range expectedTags { + actualTag := findJaegerTag(expectedTag.Key, actualTags) + compareTags(t, expectedTag, actualTag, testName) + } +} + +func compareTags(t *testing.T, expected, actual *j.Tag, testName string) { + if expected == nil && actual == nil { + return + } + if expected == nil || actual == nil { + assert.Fail(t, "one of the tags is nil", testName) + return + } + assert.Equal(t, expected.Key, actual.Key, testName) + assert.Equal(t, expected.VType, actual.VType, testName) + switch expected.VType { + case j.TagType_STRING: + assert.Equal(t, *expected.VStr, *actual.VStr, testName) + case j.TagType_LONG: + assert.Equal(t, *expected.VLong, *actual.VLong, testName) + case j.TagType_DOUBLE: + assert.Equal(t, *expected.VDouble, *actual.VDouble, testName) + case j.TagType_BOOL: + assert.Equal(t, *expected.VBool, *actual.VBool, testName) + case j.TagType_BINARY: + assert.Equal(t, expected.VBinary, actual.VBinary, testName) + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go index 3ac1bac488..3e2d98869e 100644 --- a/vendor/github.com/uber/jaeger-client-go/log/logger.go +++ b/vendor/github.com/uber/jaeger-client-go/log/logger.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package log diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger_test.go b/vendor/github.com/uber/jaeger-client-go/log/logger_test.go new file mode 100644 index 0000000000..a20580a652 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/log/logger_test.go @@ -0,0 +1,26 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package log + +import ( + "testing" +) + +func TestLogger(t *testing.T) { + for _, logger := range []Logger{StdLogger, NullLogger} { + logger.Infof("Hi %s", "there") + logger.Error("Bad wolf") + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go index 52b5dffd33..d4f0b50192 100644 --- a/vendor/github.com/uber/jaeger-client-go/logger.go +++ b/vendor/github.com/uber/jaeger-client-go/logger.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/logger_test.go b/vendor/github.com/uber/jaeger-client-go/logger_test.go new file mode 100644 index 0000000000..519c86a8a6 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/logger_test.go @@ -0,0 +1,40 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + "github.com/uber/jaeger-client-go/log" +) + +func TestLogger(t *testing.T) { + for _, logger := range []Logger{StdLogger, NullLogger} { + logger.Infof("Hi %s", "there") + logger.Error("Bad wolf") + } +} + +func TestCompatibility(t *testing.T) { + for _, logger := range []log.Logger{StdLogger, NullLogger} { + logger.Infof("Hi %s", "there") + logger.Error("Bad wolf") + } + + for _, logger := range []Logger{log.StdLogger, log.NullLogger} { + logger.Infof("Hi %s", "there") + logger.Error("Bad wolf") + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go index 7a976c6ad0..8c0e63213f 100644 --- a/vendor/github.com/uber/jaeger-client-go/metrics.go +++ b/vendor/github.com/uber/jaeger-client-go/metrics.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger @@ -39,63 +33,61 @@ type Metrics struct { TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n"` // Number of sampled spans started by this tracer - SpansStarted metrics.Counter `metric:"spans" tags:"group=lifecycle,state=started"` + SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y"` - // Number of sampled spans finished by this tracer - SpansFinished metrics.Counter `metric:"spans" tags:"group=lifecycle,state=finished"` + // Number of unsampled spans started by this tracer + SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n"` - // Number of sampled spans started by this tracer - SpansSampled metrics.Counter `metric:"spans" tags:"group=sampling,sampled=y"` - - // Number of not-sampled spans started by this tracer - SpansNotSampled metrics.Counter `metric:"spans" tags:"group=sampling,sampled=n"` + // Number of spans finished by this tracer + SpansFinished metrics.Counter `metric:"finished_spans"` // Number of errors decoding tracing context - DecodingErrors metrics.Counter `metric:"decoding-errors"` + DecodingErrors metrics.Counter `metric:"span_context_decoding_errors"` // Number of spans successfully reported - ReporterSuccess metrics.Counter `metric:"reporter-spans" tags:"state=success"` + ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok"` - // Number of spans in failed attempts to report - ReporterFailure metrics.Counter `metric:"reporter-spans" tags:"state=failure"` + // Number of spans not reported due to a Sender failure + ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err"` // Number of spans dropped due to internal queue overflow - ReporterDropped metrics.Counter `metric:"reporter-spans" tags:"state=dropped"` + ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped"` // Current number of spans in the reporter queue - ReporterQueueLength metrics.Gauge `metric:"reporter-queue"` + ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length"` // Number of times the Sampler succeeded to retrieve sampling strategy - SamplerRetrieved metrics.Counter `metric:"sampler" tags:"state=retrieved"` + SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok"` + + // Number of times the Sampler failed to retrieve sampling strategy + SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err"` // Number of times the Sampler succeeded to retrieve and update sampling strategy - SamplerUpdated metrics.Counter `metric:"sampler" tags:"state=updated"` + SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok"` // Number of times the Sampler failed to update sampling strategy - SamplerUpdateFailure metrics.Counter `metric:"sampler" tags:"state=failure,phase=updating"` - - // Number of times the Sampler failed to retrieve sampling strategy - SamplerQueryFailure metrics.Counter `metric:"sampler" tags:"state=failure,phase=query"` + SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err"` // Number of times baggage was successfully written or updated on spans. - BaggageUpdateSuccess metrics.Counter `metric:"baggage-update" tags:"result=ok"` + BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok"` // Number of times baggage failed to write or update on spans. - BaggageUpdateFailure metrics.Counter `metric:"baggage-update" tags:"result=err"` + BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err"` // Number of times baggage was truncated as per baggage restrictions. - BaggageTruncate metrics.Counter `metric:"baggage-truncate"` + BaggageTruncate metrics.Counter `metric:"baggage_truncations"` // Number of times baggage restrictions were successfully updated. - BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage-restrictions-update" tags:"result=ok"` + BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok"` // Number of times baggage restrictions failed to update. - BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage-restrictions-update" tags:"result=err"` + BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err"` } // NewMetrics creates a new Metrics struct and initializes it. func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics { m := &Metrics{} + // TODO the namespace "jaeger" should be configurable (e.g. in all-in-one "jaeger-client" would make more sense) metrics.Init(m, factory.Namespace("jaeger", nil), globalTags) return m } diff --git a/vendor/github.com/uber/jaeger-client-go/metrics_test.go b/vendor/github.com/uber/jaeger-client-go/metrics_test.go new file mode 100644 index 0000000000..09dbb3728a --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/metrics_test.go @@ -0,0 +1,48 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/uber/jaeger-lib/metrics" + "github.com/uber/jaeger-lib/metrics/testutils" +) + +func TestNewMetrics(t *testing.T) { + factory := metrics.NewLocalFactory(0) + m := NewMetrics(factory, map[string]string{"lib": "jaeger"}) + + require.NotNil(t, m.SpansStartedSampled, "counter not initialized") + require.NotNil(t, m.ReporterQueueLength, "gauge not initialized") + + m.SpansStartedSampled.Inc(1) + m.ReporterQueueLength.Update(11) + testutils.AssertCounterMetrics(t, factory, + testutils.ExpectedMetric{ + Name: "jaeger.started_spans", + Tags: map[string]string{"lib": "jaeger", "sampled": "y"}, + Value: 1, + }, + ) + testutils.AssertGaugeMetrics(t, factory, + testutils.ExpectedMetric{ + Name: "jaeger.reporter_queue_length", + Tags: map[string]string{"lib": "jaeger"}, + Value: 11, + }, + ) +} diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go index 7eeac5369d..7bbd028897 100644 --- a/vendor/github.com/uber/jaeger-client-go/observer.go +++ b/vendor/github.com/uber/jaeger-client-go/observer.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/observer_test.go b/vendor/github.com/uber/jaeger-client-go/observer_test.go new file mode 100644 index 0000000000..84c8d0b9dd --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/observer_test.go @@ -0,0 +1,109 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/stretchr/testify/assert" +) + +func TestEmptyObserver(t *testing.T) { + tracer, closer := NewTracer("test", NewConstSampler(true), NewInMemoryReporter()) + defer closer.Close() + s := tracer.StartSpan("test", ext.RPCServerOption(nil)) + s.Finish() + assert.Equal(t, s.(*Span).observer, noopSpanObserver) +} + +func TestObservers(t *testing.T) { + tracer, closer := NewTracer( + "test", + NewConstSampler(true), + NewInMemoryReporter(), + TracerOptions.Observer(testObserver{}), + TracerOptions.Observer(testObserver{}), + ) + defer closer.Close() + + s := tracer.StartSpan("test", ext.RPCServerOption(nil)) + + forEachObs := func(f func(so *testSpanObserver)) { + observers := s.(*Span).observer.(*compositeSpanObserver).observers + assert.Len(t, observers, 2) + for _, so := range observers { + f(so.(*testSpanObserver)) + } + } + + forEachObs(func(so *testSpanObserver) { + assert.Equal(t, testSpanObserver{ + operationName: "test", + tags: map[string]interface{}{ + "span.kind": ext.SpanKindRPCServerEnum, + }, + }, *so) + }) + + s.SetOperationName("test2") + s.SetTag("bender", "rodriguez") + forEachObs(func(so *testSpanObserver) { + assert.Equal(t, testSpanObserver{ + operationName: "test2", + tags: map[string]interface{}{ + "span.kind": ext.SpanKindRPCServerEnum, + "bender": "rodriguez", + }, + }, *so) + }) + + s.Finish() + forEachObs(func(so *testSpanObserver) { + assert.True(t, so.finished) + }) +} + +type testObserver struct{} + +type testSpanObserver struct { + operationName string + tags map[string]interface{} + finished bool +} + +func (o testObserver) OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver { + tags := make(map[string]interface{}) + for k, v := range options.Tags { + tags[k] = v + } + return &testSpanObserver{ + operationName: operationName, + tags: tags, + } +} + +func (o *testSpanObserver) OnSetOperationName(operationName string) { + o.operationName = operationName +} + +func (o *testSpanObserver) OnSetTag(key string, value interface{}) { + o.tags[key] = value +} + +func (o *testSpanObserver) OnFinish(options opentracing.FinishOptions) { + o.finished = true +} diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go index 30ddc84512..abca67a3c9 100644 --- a/vendor/github.com/uber/jaeger-client-go/propagation.go +++ b/vendor/github.com/uber/jaeger-client-go/propagation.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/propagation_test.go b/vendor/github.com/uber/jaeger-client-go/propagation_test.go new file mode 100644 index 0000000000..08724db526 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/propagation_test.go @@ -0,0 +1,267 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "bytes" + "net/http" + "testing" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/uber/jaeger-lib/metrics" + "github.com/uber/jaeger-lib/metrics/testutils" +) + +func initMetrics() (*metrics.LocalFactory, *Metrics) { + factory := metrics.NewLocalFactory(0) + return factory, NewMetrics(factory, nil) +} + +func TestSpanPropagator(t *testing.T) { + const op = "test" + reporter := NewInMemoryReporter() + metricsFactory, metrics := initMetrics() + tracer, closer := NewTracer("x", NewConstSampler(true), reporter, TracerOptions.Metrics(metrics), TracerOptions.ZipkinSharedRPCSpan(true)) + + mapc := opentracing.TextMapCarrier(make(map[string]string)) + httpc := opentracing.HTTPHeadersCarrier(http.Header{}) + tests := []struct { + format, carrier, formatName interface{} + }{ + {SpanContextFormat, new(SpanContext), "TraceContextFormat"}, + {opentracing.Binary, new(bytes.Buffer), "Binary"}, + {opentracing.TextMap, mapc, "TextMap"}, + {opentracing.HTTPHeaders, httpc, "HTTPHeaders"}, + } + + sp := tracer.StartSpan(op) + sp.SetTag("x", "y") // to avoid later comparing nil vs. [] + sp.SetBaggageItem("foo", "bar") + for _, test := range tests { + // starting normal child to extract its serialized context + child := tracer.StartSpan(op, opentracing.ChildOf(sp.Context())) + err := tracer.Inject(child.Context(), test.format, test.carrier) + assert.NoError(t, err) + // Note: we're not finishing the above span + childCtx, err := tracer.Extract(test.format, test.carrier) + assert.NoError(t, err) + child = tracer.StartSpan(test.formatName.(string), ext.RPCServerOption(childCtx)) + child.SetTag("x", "y") // to avoid later comparing nil vs. [] + child.Finish() + } + sp.Finish() + closer.Close() + + otSpans := reporter.GetSpans() + require.Equal(t, len(tests)+1, len(otSpans), "unexpected number of spans reporter") + + spans := make([]*Span, len(otSpans)) + for i, s := range otSpans { + spans[i] = s.(*Span) + } + + // The last span is the original one. + exp, spans := spans[len(spans)-1], spans[:len(spans)-1] + exp.duration = time.Duration(123) + exp.startTime = time.Time{}.Add(1) + require.Len(t, exp.logs, 1) // The parent span should have baggage logs + fields := exp.logs[0].Fields + require.Len(t, fields, 3) + require.Equal(t, "event", fields[0].Key()) + require.Equal(t, "baggage", fields[0].Value().(string)) + require.Equal(t, "key", fields[1].Key()) + require.Equal(t, "foo", fields[1].Value().(string)) + require.Equal(t, "value", fields[2].Key()) + require.Equal(t, "bar", fields[2].Value().(string)) + + if exp.context.ParentID() != 0 { + t.Fatalf("Root span's ParentID %d is not 0", exp.context.ParentID()) + } + + expTags := exp.tags[2:] // skip two sampler.xxx tags + for i, sp := range spans { + formatName := sp.operationName + if a, e := sp.context.ParentID(), exp.context.SpanID(); a != e { + t.Fatalf("%d: ParentID %d does not match expectation %d", i, a, e) + } else { + // Prepare for comparison. + sp.context.spanID, sp.context.parentID = exp.context.SpanID(), 0 + sp.duration, sp.startTime = exp.duration, exp.startTime + } + assert.Equal(t, exp.context, sp.context, formatName) + assert.Equal(t, "span.kind", sp.tags[0].key) + assert.Equal(t, expTags, sp.tags[1:] /*skip span.kind tag*/, formatName) + assert.Empty(t, sp.logs, formatName) + // Override collections to avoid tripping comparison on different pointers + sp.context = exp.context + sp.tags = exp.tags + sp.logs = exp.logs + sp.operationName = op + sp.references = exp.references + // Compare the rest of the fields + assert.Equal(t, exp, sp, formatName) + } + + testutils.AssertCounterMetrics(t, metricsFactory, []testutils.ExpectedMetric{ + {Name: "jaeger.started_spans", Tags: map[string]string{"sampled": "y"}, Value: 1 + 2*len(tests)}, + {Name: "jaeger.finished_spans", Value: 1 + len(tests)}, + {Name: "jaeger.traces", Tags: map[string]string{"state": "started", "sampled": "y"}, Value: 1}, + {Name: "jaeger.traces", Tags: map[string]string{"state": "joined", "sampled": "y"}, Value: len(tests)}, + }...) +} + +func TestSpanIntegrityAfterSerialize(t *testing.T) { + serializedString := "f6c385a2c57ed8d7:b04a90b7723bdc:76c385a2c57ed8d7:1" + + context, err := ContextFromString(serializedString) + require.NoError(t, err) + require.True(t, context.traceID.Low > (uint64(1)<<63)) + require.True(t, int64(context.traceID.Low) < 0) + + newSerializedString := context.String() + require.Equal(t, serializedString, newSerializedString) +} + +func TestDecodingError(t *testing.T) { + reporter := NewInMemoryReporter() + metricsFactory, metrics := initMetrics() + tracer, closer := NewTracer("x", NewConstSampler(true), reporter, TracerOptions.Metrics(metrics)) + defer closer.Close() + + badHeader := "x.x.x.x" + httpHeader := http.Header{} + httpHeader.Add(TraceContextHeaderName, badHeader) + tmc := opentracing.HTTPHeadersCarrier(httpHeader) + _, err := tracer.Extract(opentracing.HTTPHeaders, tmc) + assert.Error(t, err) + + testutils.AssertCounterMetrics(t, metricsFactory, testutils.ExpectedMetric{Name: "jaeger.span_context_decoding_errors", Value: 1}) +} + +func TestBaggagePropagationHTTP(t *testing.T) { + tracer, closer := NewTracer("DOOP", NewConstSampler(true), NewNullReporter()) + defer closer.Close() + + sp1 := tracer.StartSpan("s1").(*Span) + sp1.SetBaggageItem("Some_Key", "12345") + assert.Equal(t, "12345", sp1.BaggageItem("Some_Key"), "baggage: %+v", sp1.context.baggage) + assert.Empty(t, sp1.BaggageItem("some-KEY"), "baggage: %+v", sp1.context.baggage) + sp1.SetBaggageItem("Some_Key", "98:765") + assert.Equal(t, "98:765", sp1.BaggageItem("Some_Key"), "baggage: %+v", sp1.context.baggage) + assert.Empty(t, sp1.BaggageItem("some-KEY"), "baggage: %+v", sp1.context.baggage) + + h := http.Header{} + h.Add("header1", "value1") // make sure this does not get unmarshalled as baggage + err := tracer.Inject(sp1.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(h)) + require.NoError(t, err) + // check that colon : was encoded as %3A + assert.Equal(t, "98%3A765", h.Get(TraceBaggageHeaderPrefix+"Some_Key"), "headers: %+v", h) + + sp2, err := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(h)) + require.NoError(t, err) + assert.Equal(t, map[string]string{"some_key": "98:765"}, sp2.(SpanContext).baggage) +} + +func TestJaegerBaggageHeader(t *testing.T) { + metricsFactory, metrics := initMetrics() + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter(), + TracerOptions.Metrics(metrics), + ) + defer closer.Close() + + h := http.Header{} + h.Add(JaegerBaggageHeader, "key1=value1, key 2=value two") + + ctx, err := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(h)) + require.NoError(t, err) + + sp := tracer.StartSpan("root", opentracing.ChildOf(ctx)).(*Span) + + assert.Equal(t, "value1", sp.BaggageItem("key1")) + assert.Equal(t, "value two", sp.BaggageItem("key 2")) + + // ensure that traces.started counter is incremented, not traces.joined + testutils.AssertCounterMetrics(t, metricsFactory, + testutils.ExpectedMetric{ + Name: "jaeger.traces", Tags: map[string]string{"state": "started", "sampled": "y"}, Value: 1, + }, + ) +} + +func TestParseCommaSeperatedMap(t *testing.T) { + var testcases = []struct { + in string + out map[string]string + }{ + {"hobbit=Bilbo Baggins", map[string]string{"hobbit": "Bilbo Baggins"}}, + {"hobbit=Bilbo Baggins, dwarf= Thrain", map[string]string{"hobbit": "Bilbo Baggins", "dwarf": " Thrain"}}, + {"kevin spacey=actor", map[string]string{"kevin spacey": "actor"}}, + {"kevin%20spacey=se7en%3Aactor", map[string]string{"kevin spacey": "se7en:actor"}}, + {"key1=, key2=", map[string]string{"key1": "", "key2": ""}}, + {"malformed", map[string]string{}}, + {"malformed, string", map[string]string{}}, + {"another malformed string", map[string]string{}}, + } + + for _, testcase := range testcases { + m := (&textMapPropagator{ + headerKeys: getDefaultHeadersConfig(), + }).parseCommaSeparatedMap(testcase.in) + assert.Equal(t, testcase.out, m) + } +} + +func TestDebugCorrelationID(t *testing.T) { + metricsFactory, metrics := initMetrics() + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter(), + TracerOptions.Metrics(metrics), + ) + defer closer.Close() + + h := http.Header{} + h.Add(JaegerDebugHeader, "value1") + ctx, err := tracer.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(h)) + require.NoError(t, err) + assert.EqualValues(t, 0, ctx.(SpanContext).parentID) + assert.EqualValues(t, "value1", ctx.(SpanContext).debugID) + sp := tracer.StartSpan("root", opentracing.ChildOf(ctx)).(*Span) + assert.EqualValues(t, 0, sp.context.parentID) + assert.True(t, sp.context.traceID.IsValid()) + assert.True(t, sp.context.IsSampled()) + assert.True(t, sp.context.IsDebug()) + tagFound := false + for _, tag := range sp.tags { + if tag.key == JaegerDebugHeader { + assert.Equal(t, "value1", tag.value) + tagFound = true + } + } + assert.True(t, tagFound) + + // ensure that traces.started counter is incremented, not traces.joined + testutils.AssertCounterMetrics(t, metricsFactory, + testutils.ExpectedMetric{ + Name: "jaeger.traces", Tags: map[string]string{"state": "started", "sampled": "y"}, Value: 1, + }, + ) +} diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go index f4c9f7c39b..5646e78bb2 100644 --- a/vendor/github.com/uber/jaeger-client-go/reference.go +++ b/vendor/github.com/uber/jaeger-client-go/reference.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go index a5b2743610..3acc4be7d1 100644 --- a/vendor/github.com/uber/jaeger-client-go/reporter.go +++ b/vendor/github.com/uber/jaeger-client-go/reporter.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go index 891d1bb647..65012d7015 100644 --- a/vendor/github.com/uber/jaeger-client-go/reporter_options.go +++ b/vendor/github.com/uber/jaeger-client-go/reporter_options.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_test.go b/vendor/github.com/uber/jaeger-client-go/reporter_test.go new file mode 100644 index 0000000000..b4146f192c --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/reporter_test.go @@ -0,0 +1,272 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "io" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/uber/jaeger-lib/metrics" + mTestutils "github.com/uber/jaeger-lib/metrics/testutils" + + "github.com/uber/jaeger-client-go/testutils" + j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" +) + +type reporterSuite struct { + suite.Suite + tracer opentracing.Tracer + closer io.Closer + serviceName string + reporter *remoteReporter + collector *fakeSender + metricsFactory *metrics.LocalFactory +} + +func (s *reporterSuite) SetupTest() { + s.metricsFactory = metrics.NewLocalFactory(0) + metrics := NewMetrics(s.metricsFactory, nil) + s.serviceName = "DOOP" + s.collector = &fakeSender{} + s.reporter = NewRemoteReporter( + s.collector, ReporterOptions.Metrics(metrics), + ).(*remoteReporter) + + s.tracer, s.closer = NewTracer( + "reporter-test-service", + NewConstSampler(true), + s.reporter, + TracerOptions.Metrics(metrics)) + s.NotNil(s.tracer) +} + +func (s *reporterSuite) TearDownTest() { + s.closer.Close() + s.tracer = nil + s.reporter = nil + s.collector = nil +} + +func TestReporter(t *testing.T) { + suite.Run(t, new(reporterSuite)) +} + +func (s *reporterSuite) flushReporter() { + // Wait for reporter queue to add spans to buffer. We could've called reporter.Close(), + // but then it fails when the test suite calls close on it again (via tracer's Closer). + time.Sleep(5 * time.Millisecond) + + var wg sync.WaitGroup + wg.Add(1) + s.reporter.flushSignal <- &wg + wg.Wait() +} + +func (s *reporterSuite) TestRootSpanTags() { + s.metricsFactory.Clear() + sp := s.tracer.StartSpan("get_name") + ext.SpanKindRPCServer.Set(sp) + ext.PeerService.Set(sp, s.serviceName) + sp.Finish() + s.flushReporter() + s.Equal(1, len(s.collector.Spans())) + span := s.collector.Spans()[0] + s.Len(span.tags, 4) + s.EqualValues("server", span.tags[2].value, "span.kind should be server") + + mTestutils.AssertCounterMetrics(s.T(), s.metricsFactory, + mTestutils.ExpectedMetric{ + Name: "jaeger.reporter_spans", + Tags: map[string]string{"result": "ok"}, + Value: 1, + }, + ) +} + +func (s *reporterSuite) TestClientSpan() { + s.metricsFactory.Clear() + sp := s.tracer.StartSpan("get_name") + ext.SpanKindRPCServer.Set(sp) + ext.PeerService.Set(sp, s.serviceName) + sp2 := s.tracer.StartSpan("get_last_name", opentracing.ChildOf(sp.Context())) + ext.SpanKindRPCClient.Set(sp2) + ext.PeerService.Set(sp2, s.serviceName) + sp2.Finish() + sp.Finish() + s.flushReporter() + s.Equal(2, len(s.collector.Spans())) + span := s.collector.Spans()[0] // child span is reported first + s.EqualValues(span.context.spanID, sp2.(*Span).context.spanID) + s.Len(span.tags, 2) + s.EqualValues("client", span.tags[0].value, "span.kind should be client") + + mTestutils.AssertCounterMetrics(s.T(), s.metricsFactory, + mTestutils.ExpectedMetric{ + Name: "jaeger.reporter_spans", + Tags: map[string]string{"result": "ok"}, + Value: 2, + }, + ) +} + +func (s *reporterSuite) TestTagsAndEvents() { + sp := s.tracer.StartSpan("get_name") + sp.LogEvent("hello") + sp.LogEvent(strings.Repeat("long event ", 30)) + expected := []string{"long", "ping", "awake", "awake", "one", "two", "three", "bite me", + SamplerParamTagKey, SamplerTypeTagKey, "does not compute"} + sp.SetTag("long", strings.Repeat("x", 300)) + sp.SetTag("ping", "pong") + sp.SetTag("awake", true) + sp.SetTag("awake", false) + sp.SetTag("one", 1) + sp.SetTag("two", int32(2)) + sp.SetTag("three", int64(3)) + sp.SetTag("bite me", []byte{1}) + sp.SetTag("does not compute", sp) // should be converted to string + sp.Finish() + s.flushReporter() + s.Equal(1, len(s.collector.Spans())) + span := s.collector.Spans()[0] + s.Equal(2, len(span.logs), "expecting two logs") + s.Equal(len(expected), len(span.tags), + "expecting %d tags", len(expected)) + tags := []string{} + for _, tag := range span.tags { + tags = append(tags, string(tag.key)) + } + sort.Strings(expected) + sort.Strings(tags) + s.Equal(expected, tags, "expecting %d tags", len(expected)) + + s.NotNil(findDomainLog(span, "hello"), "expecting 'hello' log: %+v", span.logs) +} + +func TestUDPReporter(t *testing.T) { + agent, err := testutils.StartMockAgent() + require.NoError(t, err) + defer agent.Close() + + testRemoteReporter(t, + func(m *Metrics) (Transport, error) { + return NewUDPTransport(agent.SpanServerAddr(), 0) + }, + func() []*j.Batch { + return agent.GetJaegerBatches() + }) +} + +func testRemoteReporter( + t *testing.T, + factory func(m *Metrics) (Transport, error), + getBatches func() []*j.Batch, +) { + metricsFactory := metrics.NewLocalFactory(0) + metrics := NewMetrics(metricsFactory, nil) + + sender, err := factory(metrics) + require.NoError(t, err) + reporter := NewRemoteReporter(sender, ReporterOptions.Metrics(metrics)).(*remoteReporter) + + tracer, closer := NewTracer( + "reporter-test-service", + NewConstSampler(true), + reporter, + TracerOptions.Metrics(metrics)) + + span := tracer.StartSpan("leela") + ext.SpanKindRPCClient.Set(span) + ext.PeerService.Set(span, "downstream") + span.Finish() + closer.Close() // close the tracer, which also closes and flushes the reporter + // however, in case of UDP reporter it's fire and forget, so we need to wait a bit + time.Sleep(5 * time.Millisecond) + + batches := getBatches() + require.Equal(t, 1, len(batches)) + require.Equal(t, 1, len(batches[0].Spans)) + assert.Equal(t, "leela", batches[0].Spans[0].OperationName) + assert.Equal(t, "reporter-test-service", batches[0].Process.ServiceName) + tag := findJaegerTag("peer.service", batches[0].Spans[0].Tags) + assert.NotNil(t, tag) + assert.Equal(t, "downstream", *tag.VStr) + + mTestutils.AssertCounterMetrics(t, metricsFactory, []mTestutils.ExpectedMetric{ + {Name: "jaeger.reporter_spans", Tags: map[string]string{"result": "ok"}, Value: 1}, + {Name: "jaeger.reporter_spans", Tags: map[string]string{"result": "err"}, Value: 0}, + }...) +} + +func (s *reporterSuite) TestMemoryReporterReport() { + sp := s.tracer.StartSpan("leela") + ext.PeerService.Set(sp, s.serviceName) + reporter := NewInMemoryReporter() + reporter.Report(sp.(*Span)) + s.Equal(1, reporter.SpansSubmitted(), "expected number of spans submitted") + reporter.Close() +} + +type fakeSender struct { + spans []*Span + mutex sync.Mutex +} + +func (s *fakeSender) Append(span *Span) (int, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.spans = append(s.spans, span) + return 1, nil +} + +func (s *fakeSender) Flush() (int, error) { + return 0, nil +} + +func (s *fakeSender) Close() error { return nil } + +func (s *fakeSender) Spans() []*Span { + s.mutex.Lock() + defer s.mutex.Unlock() + res := make([]*Span, len(s.spans)) + copy(res, s.spans) + return res +} + +func findDomainLog(span *Span, key string) *opentracing.LogRecord { + for _, log := range span.logs { + if log.Fields[0].Value().(string) == key { + return &log + } + } + return nil +} + +func findDomainTag(span *Span, key string) *Tag { + for _, tag := range span.tags { + if tag.key == key { + return &tag + } + } + return nil +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go index 5b73b14071..51aa11b350 100644 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. // Package rpcmetrics implements an Observer that can be used to emit RPC metrics. package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go index 79ec1c8ef9..30555243d0 100644 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints_test.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints_test.go new file mode 100644 index 0000000000..8a5b4e5305 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints_test.go @@ -0,0 +1,43 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNormalizedEndpoints(t *testing.T) { + n := newNormalizedEndpoints(1, DefaultNameNormalizer) + + assertLen := func(l int) { + n.mux.RLock() + defer n.mux.RUnlock() + assert.Len(t, n.names, l) + } + + assert.Equal(t, "ab-cd", n.normalize("ab^cd"), "one translation") + assert.Equal(t, "ab-cd", n.normalize("ab^cd"), "cache hit") + assertLen(1) + assert.Equal(t, "", n.normalize("xys"), "cache overflow") + assertLen(1) +} + +func TestNormalizedEndpointsDoubleLocking(t *testing.T) { + n := newNormalizedEndpoints(1, DefaultNameNormalizer) + assert.Equal(t, "ab-cd", n.normalize("ab^cd"), "fill out the cache") + assert.Equal(t, "", n.normalizeWithLock("xys"), "cache overflow") +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go index d01387fa40..ab8d74c291 100644 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics_test.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics_test.go new file mode 100644 index 0000000000..292ec943f3 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics_test.go @@ -0,0 +1,61 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/uber/jaeger-lib/metrics" + "github.com/uber/jaeger-lib/metrics/testutils" +) + +// E.g. tags("key", "value", "key", "value") +func tags(kv ...string) map[string]string { + m := make(map[string]string) + for i := 0; i < len(kv)-1; i += 2 { + m[kv[i]] = kv[i+1] + } + return m +} + +func endpointTags(endpoint string, kv ...string) map[string]string { + return tags(append([]string{"endpoint", endpoint}, kv...)...) +} + +func TestMetricsByEndpoint(t *testing.T) { + met := metrics.NewLocalFactory(0) + mbe := newMetricsByEndpoint(met, DefaultNameNormalizer, 2) + + m1 := mbe.get("abc1") + m2 := mbe.get("abc1") // from cache + m2a := mbe.getWithWriteLock("abc1") // from cache in double-checked lock + assert.Equal(t, m1, m2) + assert.Equal(t, m1, m2a) + + m3 := mbe.get("abc3") + m4 := mbe.get("overflow") + m5 := mbe.get("overflow2") + + for _, m := range []*Metrics{m1, m2, m2a, m3, m4, m5} { + m.RequestCountSuccess.Inc(1) + } + + testutils.AssertCounterMetrics(t, met, + testutils.ExpectedMetric{Name: "requests", Tags: endpointTags("abc1", "error", "false"), Value: 3}, + testutils.ExpectedMetric{Name: "requests", Tags: endpointTags("abc3", "error", "false"), Value: 1}, + testutils.ExpectedMetric{Name: "requests", Tags: endpointTags("other", "error", "false"), Value: 2}, + ) +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go index ac0f79b0fe..148d84b3a1 100644 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer_test.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer_test.go new file mode 100644 index 0000000000..a93c17165b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer_test.go @@ -0,0 +1,34 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSimpleNameNormalizer(t *testing.T) { + n := &SimpleNameNormalizer{ + SafeSets: []SafeCharacterSet{ + &Range{From: 'a', To: 'z'}, + &Char{'-'}, + }, + Replacement: '-', + } + assert.Equal(t, "ab-cd", n.Normalize("ab-cd"), "all valid") + assert.Equal(t, "ab-cd", n.Normalize("ab.cd"), "single mismatch") + assert.Equal(t, "a--cd", n.Normalize("aB-cd"), "range letter mismatch") +} diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go index c10624d78e..eca5ff6f3b 100644 --- a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package rpcmetrics diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer_test.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer_test.go new file mode 100644 index 0000000000..c2c31ae078 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer_test.go @@ -0,0 +1,177 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpcmetrics + +import ( + "fmt" + "testing" + "time" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/stretchr/testify/assert" + "github.com/uber/jaeger-lib/metrics" + u "github.com/uber/jaeger-lib/metrics/testutils" + + "github.com/opentracing/opentracing-go/ext" + jaeger "github.com/uber/jaeger-client-go" +) + +func ExampleObserver() { + metricsFactory := metrics.NewLocalFactory(0) + metricsObserver := NewObserver( + metricsFactory, + DefaultNameNormalizer, + ) + tracer, closer := jaeger.NewTracer( + "serviceName", + jaeger.NewConstSampler(true), + jaeger.NewInMemoryReporter(), + jaeger.TracerOptions.Observer(metricsObserver), + ) + defer closer.Close() + + span := tracer.StartSpan("test", ext.SpanKindRPCServer) + span.Finish() + + c, _ := metricsFactory.Snapshot() + fmt.Printf("requests (success): %d\n", c["requests|endpoint=test|error=false"]) + fmt.Printf("requests (failure): %d\n", c["requests|endpoint=test|error=true"]) + // Output: + // requests (success): 1 + // requests (failure): 0 +} + +type testTracer struct { + metrics *metrics.LocalFactory + tracer opentracing.Tracer +} + +func withTestTracer(runTest func(tt *testTracer)) { + sampler := jaeger.NewConstSampler(true) + reporter := jaeger.NewInMemoryReporter() + metrics := metrics.NewLocalFactory(time.Minute) + observer := NewObserver(metrics, DefaultNameNormalizer) + tracer, closer := jaeger.NewTracer( + "test", + sampler, + reporter, + jaeger.TracerOptions.Observer(observer)) + defer closer.Close() + runTest(&testTracer{ + metrics: metrics, + tracer: tracer, + }) +} + +func TestObserver(t *testing.T) { + withTestTracer(func(testTracer *testTracer) { + ts := time.Now() + finishOptions := opentracing.FinishOptions{ + FinishTime: ts.Add(50 * time.Millisecond), + } + + testCases := []struct { + name string + tag opentracing.Tag + opNameOverride string + err bool + }{ + {name: "local-span", tag: opentracing.Tag{Key: "x", Value: "y"}}, + {name: "get-user", tag: ext.SpanKindRPCServer}, + {name: "get-user", tag: ext.SpanKindRPCServer, opNameOverride: "get-user-override"}, + {name: "get-user", tag: ext.SpanKindRPCServer, err: true}, + {name: "get-user-client", tag: ext.SpanKindRPCClient}, + } + + for _, testCase := range testCases { + span := testTracer.tracer.StartSpan( + testCase.name, + testCase.tag, + opentracing.StartTime(ts), + ) + if testCase.opNameOverride != "" { + span.SetOperationName(testCase.opNameOverride) + } + if testCase.err { + ext.Error.Set(span, true) + } + span.FinishWithOptions(finishOptions) + } + + u.AssertCounterMetrics(t, + testTracer.metrics, + u.ExpectedMetric{Name: "requests", Tags: endpointTags("local-span", "error", "false"), Value: 0}, + u.ExpectedMetric{Name: "requests", Tags: endpointTags("get-user", "error", "false"), Value: 1}, + u.ExpectedMetric{Name: "requests", Tags: endpointTags("get-user", "error", "true"), Value: 1}, + u.ExpectedMetric{Name: "requests", Tags: endpointTags("get-user-override", "error", "false"), Value: 1}, + u.ExpectedMetric{Name: "requests", Tags: endpointTags("get-user-client", "error", "false"), Value: 0}, + ) + // TODO something wrong with string generation, .P99 should not be appended to the tag + // as a result we cannot use u.AssertGaugeMetrics + _, g := testTracer.metrics.Snapshot() + assert.EqualValues(t, 51, g["request_latency|endpoint=get-user|error=false.P99"]) + assert.EqualValues(t, 51, g["request_latency|endpoint=get-user|error=true.P99"]) + }) +} + +func TestTags(t *testing.T) { + type tagTestCase struct { + key string + value interface{} + metrics []u.ExpectedMetric + } + + testCases := []tagTestCase{ + {key: "something", value: 42, metrics: []u.ExpectedMetric{ + {Name: "requests", Value: 1, Tags: tags("error", "false")}, + }}, + {key: "error", value: true, metrics: []u.ExpectedMetric{ + {Name: "requests", Value: 1, Tags: tags("error", "true")}, + }}, + {key: "error", value: "true", metrics: []u.ExpectedMetric{ + {Name: "requests", Value: 1, Tags: tags("error", "true")}, + }}, + } + + for i := 2; i <= 5; i++ { + values := []interface{}{ + i * 100, + uint16(i * 100), + fmt.Sprintf("%d00", i), + } + for _, v := range values { + testCases = append(testCases, tagTestCase{ + key: "http.status_code", value: v, metrics: []u.ExpectedMetric{ + {Name: "http_requests", Value: 1, Tags: tags("status_code", fmt.Sprintf("%dxx", i))}, + }, + }) + } + } + + for _, tc := range testCases { + testCase := tc // capture loop var + for i := range testCase.metrics { + testCase.metrics[i].Tags["endpoint"] = "span" + } + t.Run(fmt.Sprintf("%s-%v", testCase.key, testCase.value), func(t *testing.T) { + withTestTracer(func(testTracer *testTracer) { + span := testTracer.tracer.StartSpan("span", ext.SpanKindRPCServer) + span.SetTag(testCase.key, testCase.value) + span.Finish() + u.AssertCounterMetrics(t, testTracer.metrics, testCase.metrics...) + }) + }) + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go index f7eeee7e94..f2e8c994e2 100644 --- a/vendor/github.com/uber/jaeger-client-go/sampler.go +++ b/vendor/github.com/uber/jaeger-client-go/sampler.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger @@ -338,6 +332,7 @@ func (s *adaptiveSampler) Close() { for _, sampler := range s.samplers { sampler.Close() } + s.defaultSampler.Close() } func (s *adaptiveSampler) Equal(other Sampler) bool { diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_options.go index 8d9c5bf9ee..75d28a5611 100644 --- a/vendor/github.com/uber/jaeger-client-go/sampler_options.go +++ b/vendor/github.com/uber/jaeger-client-go/sampler_options.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_test.go b/vendor/github.com/uber/jaeger-client-go/sampler_test.go new file mode 100644 index 0000000000..ae7ab5434c --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/sampler_test.go @@ -0,0 +1,691 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "errors" + "fmt" + "runtime" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/uber/jaeger-lib/metrics" + mTestutils "github.com/uber/jaeger-lib/metrics/testutils" + + "github.com/uber/jaeger-client-go/log" + "github.com/uber/jaeger-client-go/testutils" + "github.com/uber/jaeger-client-go/thrift-gen/sampling" + "github.com/uber/jaeger-client-go/utils" +) + +const ( + testOperationName = "op" + testFirstTimeOperationName = "firstTimeOp" + + testDefaultSamplingProbability = 0.5 + testMaxID = uint64(1) << 62 + testDefaultMaxOperations = 10 +) + +var ( + testProbabilisticExpectedTags = []Tag{ + {"sampler.type", "probabilistic"}, + {"sampler.param", 0.5}, + } + testLowerBoundExpectedTags = []Tag{ + {"sampler.type", "lowerbound"}, + {"sampler.param", 0.5}, + } +) + +func TestSamplerTags(t *testing.T) { + prob, err := NewProbabilisticSampler(0.1) + require.NoError(t, err) + rate := NewRateLimitingSampler(0.1) + remote := &RemotelyControlledSampler{} + remote.sampler = NewConstSampler(true) + tests := []struct { + sampler Sampler + typeTag string + paramTag interface{} + }{ + {NewConstSampler(true), "const", true}, + {NewConstSampler(false), "const", false}, + {prob, "probabilistic", 0.1}, + {rate, "ratelimiting", 0.1}, + {remote, "const", true}, + } + for _, test := range tests { + _, tags := test.sampler.IsSampled(TraceID{}, testOperationName) + count := 0 + for _, tag := range tags { + if tag.key == SamplerTypeTagKey { + assert.Equal(t, test.typeTag, tag.value) + count++ + } + if tag.key == SamplerParamTagKey { + assert.Equal(t, test.paramTag, tag.value) + count++ + } + } + assert.Equal(t, 2, count) + } +} + +func TestApplySamplerOptions(t *testing.T) { + options := applySamplerOptions() + sampler, ok := options.sampler.(*ProbabilisticSampler) + assert.True(t, ok) + assert.Equal(t, 0.001, sampler.samplingRate) + + assert.NotNil(t, options.logger) + assert.NotZero(t, options.maxOperations) + assert.NotEmpty(t, options.samplingServerURL) + assert.NotNil(t, options.metrics) + assert.NotZero(t, options.samplingRefreshInterval) +} + +func TestProbabilisticSamplerErrors(t *testing.T) { + _, err := NewProbabilisticSampler(-0.1) + assert.Error(t, err) + _, err = NewProbabilisticSampler(1.1) + assert.Error(t, err) +} + +func TestProbabilisticSampler(t *testing.T) { + sampler, _ := NewProbabilisticSampler(0.5) + sampled, tags := sampler.IsSampled(TraceID{Low: testMaxID + 10}, testOperationName) + assert.False(t, sampled) + assert.Equal(t, testProbabilisticExpectedTags, tags) + sampled, tags = sampler.IsSampled(TraceID{Low: testMaxID - 20}, testOperationName) + assert.True(t, sampled) + assert.Equal(t, testProbabilisticExpectedTags, tags) + sampler2, _ := NewProbabilisticSampler(0.5) + assert.True(t, sampler.Equal(sampler2)) + assert.False(t, sampler.Equal(NewConstSampler(true))) +} + +func TestProbabilisticSamplerPerformance(t *testing.T) { + t.Skip("Skipped performance test") + sampler, _ := NewProbabilisticSampler(0.01) + rand := utils.NewRand(8736823764) + var count uint64 + for i := 0; i < 100000000; i++ { + id := TraceID{Low: uint64(rand.Int63())} + if sampled, _ := sampler.IsSampled(id, testOperationName); sampled { + count++ + } + } + println("Sampled:", count, "rate=", float64(count)/float64(100000000)) + // Sampled: 999829 rate= 0.009998290 +} + +func TestRateLimitingSampler(t *testing.T) { + sampler := NewRateLimitingSampler(2) + sampler2 := NewRateLimitingSampler(2) + sampler3 := NewRateLimitingSampler(3) + assert.True(t, sampler.Equal(sampler2)) + assert.False(t, sampler.Equal(sampler3)) + assert.False(t, sampler.Equal(NewConstSampler(false))) + + sampler = NewRateLimitingSampler(2) + sampled, _ := sampler.IsSampled(TraceID{}, testOperationName) + assert.True(t, sampled) + sampled, _ = sampler.IsSampled(TraceID{}, testOperationName) + assert.True(t, sampled) + sampled, _ = sampler.IsSampled(TraceID{}, testOperationName) + assert.False(t, sampled) + + sampler = NewRateLimitingSampler(0.1) + sampled, _ = sampler.IsSampled(TraceID{}, testOperationName) + assert.True(t, sampled) + sampled, _ = sampler.IsSampled(TraceID{}, testOperationName) + assert.False(t, sampled) +} + +func TestGuaranteedThroughputProbabilisticSamplerUpdate(t *testing.T) { + samplingRate := 0.5 + lowerBound := 2.0 + sampler, err := NewGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate) + assert.NoError(t, err) + + assert.Equal(t, lowerBound, sampler.lowerBound) + assert.Equal(t, samplingRate, sampler.samplingRate) + + newSamplingRate := 0.6 + newLowerBound := 1.0 + sampler.update(newLowerBound, newSamplingRate) + assert.Equal(t, newLowerBound, sampler.lowerBound) + assert.Equal(t, newSamplingRate, sampler.samplingRate) + + newSamplingRate = 1.1 + sampler.update(newLowerBound, newSamplingRate) + assert.Equal(t, 1.0, sampler.samplingRate) +} + +func TestAdaptiveSampler(t *testing.T) { + samplingRates := []*sampling.OperationSamplingStrategy{ + { + Operation: testOperationName, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{SamplingRate: testDefaultSamplingProbability}, + }, + } + strategies := &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: testDefaultSamplingProbability, + DefaultLowerBoundTracesPerSecond: 1.0, + PerOperationStrategies: samplingRates, + } + + sampler, err := NewAdaptiveSampler(strategies, testDefaultMaxOperations) + require.NoError(t, err) + defer sampler.Close() + + sampled, tags := sampler.IsSampled(TraceID{Low: testMaxID + 10}, testOperationName) + assert.True(t, sampled) + assert.Equal(t, testLowerBoundExpectedTags, tags) + + sampled, tags = sampler.IsSampled(TraceID{Low: testMaxID - 20}, testOperationName) + assert.True(t, sampled) + assert.Equal(t, testProbabilisticExpectedTags, tags) + + sampled, tags = sampler.IsSampled(TraceID{Low: testMaxID + 10}, testOperationName) + assert.False(t, sampled) + + // This operation is seen for the first time by the sampler + sampled, tags = sampler.IsSampled(TraceID{Low: testMaxID}, testFirstTimeOperationName) + assert.True(t, sampled) + assert.Equal(t, testProbabilisticExpectedTags, tags) +} + +func TestAdaptiveSamplerErrors(t *testing.T) { + strategies := &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: testDefaultSamplingProbability, + DefaultLowerBoundTracesPerSecond: 2.0, + PerOperationStrategies: []*sampling.OperationSamplingStrategy{ + { + Operation: testOperationName, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{SamplingRate: -0.1}, + }, + }, + } + + sampler, err := NewAdaptiveSampler(strategies, testDefaultMaxOperations) + assert.NoError(t, err) + assert.Equal(t, 0.0, sampler.(*adaptiveSampler).samplers[testOperationName].samplingRate) + + strategies.PerOperationStrategies[0].ProbabilisticSampling.SamplingRate = 1.1 + sampler, err = NewAdaptiveSampler(strategies, testDefaultMaxOperations) + assert.NoError(t, err) + assert.Equal(t, 1.0, sampler.(*adaptiveSampler).samplers[testOperationName].samplingRate) +} + +func TestAdaptiveSamplerUpdate(t *testing.T) { + samplingRate := 0.1 + lowerBound := 2.0 + samplingRates := []*sampling.OperationSamplingStrategy{ + { + Operation: testOperationName, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{SamplingRate: samplingRate}, + }, + } + strategies := &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: testDefaultSamplingProbability, + DefaultLowerBoundTracesPerSecond: lowerBound, + PerOperationStrategies: samplingRates, + } + + s, err := NewAdaptiveSampler(strategies, testDefaultMaxOperations) + assert.NoError(t, err) + + sampler, ok := s.(*adaptiveSampler) + assert.True(t, ok) + assert.Equal(t, lowerBound, sampler.lowerBound) + assert.Equal(t, testDefaultSamplingProbability, sampler.defaultSampler.SamplingRate()) + assert.Len(t, sampler.samplers, 1) + + // Update the sampler with new sampling rates + newSamplingRate := 0.2 + newLowerBound := 3.0 + newDefaultSamplingProbability := 0.1 + newSamplingRates := []*sampling.OperationSamplingStrategy{ + { + Operation: testOperationName, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{SamplingRate: newSamplingRate}, + }, + { + Operation: testFirstTimeOperationName, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{SamplingRate: newSamplingRate}, + }, + } + strategies = &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: newDefaultSamplingProbability, + DefaultLowerBoundTracesPerSecond: newLowerBound, + PerOperationStrategies: newSamplingRates, + } + + sampler.update(strategies) + assert.Equal(t, newLowerBound, sampler.lowerBound) + assert.Equal(t, newDefaultSamplingProbability, sampler.defaultSampler.SamplingRate()) + assert.Len(t, sampler.samplers, 2) +} + +func initAgent(t *testing.T) (*testutils.MockAgent, *RemotelyControlledSampler, *metrics.LocalFactory) { + agent, err := testutils.StartMockAgent() + require.NoError(t, err) + + metricsFactory := metrics.NewLocalFactory(0) + metrics := NewMetrics(metricsFactory, nil) + + initialSampler, _ := NewProbabilisticSampler(0.001) + sampler := NewRemotelyControlledSampler( + "client app", + SamplerOptions.Metrics(metrics), + SamplerOptions.SamplingServerURL("http://"+agent.SamplingServerAddr()), + SamplerOptions.MaxOperations(testDefaultMaxOperations), + SamplerOptions.InitialSampler(initialSampler), + SamplerOptions.Logger(log.NullLogger), + SamplerOptions.SamplingRefreshInterval(time.Minute), + ) + sampler.Close() // stop timer-based updates, we want to call them manually + + return agent, sampler, metricsFactory +} + +func TestRemotelyControlledSampler(t *testing.T) { + agent, remoteSampler, metricsFactory := initAgent(t) + defer agent.Close() + + initSampler, ok := remoteSampler.sampler.(*ProbabilisticSampler) + assert.True(t, ok) + + agent.AddSamplingStrategy("client app", + getSamplingStrategyResponse(sampling.SamplingStrategyType_PROBABILISTIC, testDefaultSamplingProbability)) + remoteSampler.updateSampler() + mTestutils.AssertCounterMetrics(t, metricsFactory, []mTestutils.ExpectedMetric{ + {Name: "jaeger.sampler_queries", Tags: map[string]string{"result": "ok"}, Value: 1}, + {Name: "jaeger.sampler_updates", Tags: map[string]string{"result": "ok"}, Value: 1}, + }...) + _, ok = remoteSampler.sampler.(*ProbabilisticSampler) + assert.True(t, ok) + assert.NotEqual(t, initSampler, remoteSampler.sampler, "Sampler should have been updated") + + sampled, tags := remoteSampler.IsSampled(TraceID{Low: testMaxID + 10}, testOperationName) + assert.False(t, sampled) + assert.Equal(t, testProbabilisticExpectedTags, tags) + sampled, tags = remoteSampler.IsSampled(TraceID{Low: testMaxID - 10}, testOperationName) + assert.True(t, sampled) + assert.Equal(t, testProbabilisticExpectedTags, tags) + + remoteSampler.sampler = initSampler + c := make(chan time.Time) + remoteSampler.Lock() + remoteSampler.timer = &time.Ticker{C: c} + remoteSampler.Unlock() + go remoteSampler.pollController() + + c <- time.Now() // force update based on timer + time.Sleep(10 * time.Millisecond) + remoteSampler.Close() + + _, ok = remoteSampler.sampler.(*ProbabilisticSampler) + assert.True(t, ok) + assert.NotEqual(t, initSampler, remoteSampler.sampler, "Sampler should have been updated from timer") + + assert.True(t, remoteSampler.Equal(remoteSampler)) +} + +func generateTags(key string, value float64) []Tag { + return []Tag{ + {"sampler.type", key}, + {"sampler.param", value}, + } +} + +func TestRemotelyControlledSampler_updateSampler(t *testing.T) { + tests := []struct { + probabilities map[string]float64 + defaultProbability float64 + expectedDefaultProbability float64 + expectedTags []Tag + }{ + { + probabilities: map[string]float64{testOperationName: 1.1}, + defaultProbability: testDefaultSamplingProbability, + expectedDefaultProbability: testDefaultSamplingProbability, + expectedTags: generateTags("probabilistic", 1.0), + }, + { + probabilities: map[string]float64{testOperationName: testDefaultSamplingProbability}, + defaultProbability: testDefaultSamplingProbability, + expectedDefaultProbability: testDefaultSamplingProbability, + expectedTags: testProbabilisticExpectedTags, + }, + { + probabilities: map[string]float64{ + testOperationName: testDefaultSamplingProbability, + testFirstTimeOperationName: testDefaultSamplingProbability, + }, + defaultProbability: testDefaultSamplingProbability, + expectedDefaultProbability: testDefaultSamplingProbability, + expectedTags: testProbabilisticExpectedTags, + }, + { + probabilities: map[string]float64{"new op": 1.1}, + defaultProbability: testDefaultSamplingProbability, + expectedDefaultProbability: testDefaultSamplingProbability, + expectedTags: testProbabilisticExpectedTags, + }, + { + probabilities: map[string]float64{"new op": 1.1}, + defaultProbability: 1.1, + expectedDefaultProbability: 1.0, + expectedTags: generateTags("probabilistic", 1.0), + }, + } + + for i, test := range tests { + t.Run(fmt.Sprintf("test_%d", i), func(t *testing.T) { + agent, sampler, metricsFactory := initAgent(t) + defer agent.Close() + + initSampler, ok := sampler.sampler.(*ProbabilisticSampler) + assert.True(t, ok) + + res := &sampling.SamplingStrategyResponse{ + StrategyType: sampling.SamplingStrategyType_PROBABILISTIC, + OperationSampling: &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: test.defaultProbability, + DefaultLowerBoundTracesPerSecond: 0.001, + }, + } + for opName, prob := range test.probabilities { + res.OperationSampling.PerOperationStrategies = append(res.OperationSampling.PerOperationStrategies, + &sampling.OperationSamplingStrategy{ + Operation: opName, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{ + SamplingRate: prob, + }, + }, + ) + } + + agent.AddSamplingStrategy("client app", res) + sampler.updateSampler() + + mTestutils.AssertCounterMetrics(t, metricsFactory, + mTestutils.ExpectedMetric{ + Name: "jaeger.sampler_updates", Tags: map[string]string{"result": "ok"}, Value: 1, + }, + ) + + s, ok := sampler.sampler.(*adaptiveSampler) + assert.True(t, ok) + assert.NotEqual(t, initSampler, sampler.sampler, "Sampler should have been updated") + assert.Equal(t, test.expectedDefaultProbability, s.defaultSampler.SamplingRate()) + + // First call is always sampled + sampled, tags := sampler.IsSampled(TraceID{Low: testMaxID + 10}, testOperationName) + assert.True(t, sampled) + + sampled, tags = sampler.IsSampled(TraceID{Low: testMaxID - 10}, testOperationName) + assert.True(t, sampled) + assert.Equal(t, test.expectedTags, tags) + }) + } +} + +func TestMaxOperations(t *testing.T) { + samplingRates := []*sampling.OperationSamplingStrategy{ + { + Operation: testOperationName, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{SamplingRate: 0.1}, + }, + } + strategies := &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: testDefaultSamplingProbability, + DefaultLowerBoundTracesPerSecond: 2.0, + PerOperationStrategies: samplingRates, + } + + sampler, err := NewAdaptiveSampler(strategies, 1) + assert.NoError(t, err) + + sampled, tags := sampler.IsSampled(TraceID{Low: testMaxID - 10}, testFirstTimeOperationName) + assert.True(t, sampled) + assert.Equal(t, testProbabilisticExpectedTags, tags) +} + +func TestSamplerQueryError(t *testing.T) { + agent, sampler, metricsFactory := initAgent(t) + defer agent.Close() + + // override the actual handler + sampler.manager = &fakeSamplingManager{} + + initSampler, ok := sampler.sampler.(*ProbabilisticSampler) + assert.True(t, ok) + + sampler.Close() // stop timer-based updates, we want to call them manually + + sampler.updateSampler() + assert.Equal(t, initSampler, sampler.sampler, "Sampler should not have been updated due to query error") + + mTestutils.AssertCounterMetrics(t, metricsFactory, + mTestutils.ExpectedMetric{Name: "jaeger.sampler_queries", Tags: map[string]string{"result": "err"}, Value: 1}, + ) +} + +type fakeSamplingManager struct{} + +func (c *fakeSamplingManager) GetSamplingStrategy(serviceName string) (*sampling.SamplingStrategyResponse, error) { + return nil, errors.New("query error") +} + +func TestRemotelyControlledSampler_updateSamplerFromAdaptiveSampler(t *testing.T) { + agent, remoteSampler, metricsFactory := initAgent(t) + defer agent.Close() + remoteSampler.Close() // stop timer-based updates, we want to call them manually + + strategies := &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: testDefaultSamplingProbability, + DefaultLowerBoundTracesPerSecond: 1.0, + } + + adaptiveSampler, err := NewAdaptiveSampler(strategies, testDefaultMaxOperations) + require.NoError(t, err) + + // Overwrite the sampler with an adaptive sampler + remoteSampler.sampler = adaptiveSampler + + agent.AddSamplingStrategy("client app", + getSamplingStrategyResponse(sampling.SamplingStrategyType_PROBABILISTIC, 0.5)) + remoteSampler.updateSampler() + + // Sampler should have been updated to probabilistic + _, ok := remoteSampler.sampler.(*ProbabilisticSampler) + require.True(t, ok) + + // Overwrite the sampler with an adaptive sampler + remoteSampler.sampler = adaptiveSampler + + agent.AddSamplingStrategy("client app", + getSamplingStrategyResponse(sampling.SamplingStrategyType_RATE_LIMITING, 1)) + remoteSampler.updateSampler() + + // Sampler should have been updated to ratelimiting + _, ok = remoteSampler.sampler.(*rateLimitingSampler) + require.True(t, ok) + + // Overwrite the sampler with an adaptive sampler + remoteSampler.sampler = adaptiveSampler + + // Update existing adaptive sampler + agent.AddSamplingStrategy("client app", &sampling.SamplingStrategyResponse{OperationSampling: strategies}) + remoteSampler.updateSampler() + + mTestutils.AssertCounterMetrics(t, metricsFactory, + mTestutils.ExpectedMetric{Name: "jaeger.sampler_queries", Tags: map[string]string{"result": "ok"}, Value: 3}, + mTestutils.ExpectedMetric{Name: "jaeger.sampler_updates", Tags: map[string]string{"result": "ok"}, Value: 3}, + ) +} + +func TestRemotelyControlledSampler_updateRateLimitingOrProbabilisticSampler(t *testing.T) { + probabilisticSampler, err := NewProbabilisticSampler(0.002) + require.NoError(t, err) + otherProbabilisticSampler, err := NewProbabilisticSampler(0.003) + require.NoError(t, err) + maxProbabilisticSampler, err := NewProbabilisticSampler(1.0) + require.NoError(t, err) + + rateLimitingSampler := NewRateLimitingSampler(2) + otherRateLimitingSampler := NewRateLimitingSampler(3) + + testCases := []struct { + res *sampling.SamplingStrategyResponse + initSampler Sampler + expectedSampler Sampler + shouldErr bool + referenceEquivalence bool + caption string + }{ + { + res: getSamplingStrategyResponse(sampling.SamplingStrategyType_PROBABILISTIC, 1.5), + initSampler: probabilisticSampler, + expectedSampler: maxProbabilisticSampler, + shouldErr: false, + referenceEquivalence: false, + caption: "invalid probabilistic strategy", + }, + { + res: getSamplingStrategyResponse(sampling.SamplingStrategyType_PROBABILISTIC, 0.002), + initSampler: probabilisticSampler, + expectedSampler: probabilisticSampler, + shouldErr: false, + referenceEquivalence: true, + caption: "unchanged probabilistic strategy", + }, + { + res: getSamplingStrategyResponse(sampling.SamplingStrategyType_PROBABILISTIC, 0.003), + initSampler: probabilisticSampler, + expectedSampler: otherProbabilisticSampler, + shouldErr: false, + referenceEquivalence: false, + caption: "valid probabilistic strategy", + }, + { + res: getSamplingStrategyResponse(sampling.SamplingStrategyType_RATE_LIMITING, 2), + initSampler: rateLimitingSampler, + expectedSampler: rateLimitingSampler, + shouldErr: false, + referenceEquivalence: true, + caption: "unchanged rate limiting strategy", + }, + { + res: getSamplingStrategyResponse(sampling.SamplingStrategyType_RATE_LIMITING, 3), + initSampler: rateLimitingSampler, + expectedSampler: otherRateLimitingSampler, + shouldErr: false, + referenceEquivalence: false, + caption: "valid rate limiting strategy", + }, + { + res: &sampling.SamplingStrategyResponse{}, + initSampler: rateLimitingSampler, + expectedSampler: rateLimitingSampler, + shouldErr: true, + referenceEquivalence: true, + caption: "invalid strategy", + }, + } + + for _, tc := range testCases { + testCase := tc // capture loop var + t.Run(testCase.caption, func(t *testing.T) { + remoteSampler := &RemotelyControlledSampler{samplerOptions: samplerOptions{sampler: testCase.initSampler}} + err := remoteSampler.updateRateLimitingOrProbabilisticSampler(testCase.res) + if testCase.shouldErr { + require.Error(t, err) + } + if testCase.referenceEquivalence { + assert.Equal(t, testCase.expectedSampler, remoteSampler.sampler) + } else { + assert.True(t, testCase.expectedSampler.Equal(remoteSampler.sampler)) + } + }) + } +} + +func getSamplingStrategyResponse(strategyType sampling.SamplingStrategyType, value float64) *sampling.SamplingStrategyResponse { + if strategyType == sampling.SamplingStrategyType_PROBABILISTIC { + return &sampling.SamplingStrategyResponse{ + StrategyType: sampling.SamplingStrategyType_PROBABILISTIC, + ProbabilisticSampling: &sampling.ProbabilisticSamplingStrategy{ + SamplingRate: value, + }, + } + } + if strategyType == sampling.SamplingStrategyType_RATE_LIMITING { + return &sampling.SamplingStrategyResponse{ + StrategyType: sampling.SamplingStrategyType_RATE_LIMITING, + RateLimitingSampling: &sampling.RateLimitingSamplingStrategy{ + MaxTracesPerSecond: int16(value), + }, + } + } + return nil +} + +func TestAdaptiveSampler_lockRaceCondition(t *testing.T) { + agent, remoteSampler, _ := initAgent(t) + defer agent.Close() + remoteSampler.Close() // stop timer-based updates, we want to call them manually + + numOperations := 1000 + adaptiveSampler, err := NewAdaptiveSampler( + &sampling.PerOperationSamplingStrategies{ + DefaultSamplingProbability: 1, + }, + 2000, + ) + require.NoError(t, err) + + // Overwrite the sampler with an adaptive sampler + remoteSampler.sampler = adaptiveSampler + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(2) + + // Start 2 go routines that will simulate simultaneous calls to IsSampled + go func() { + defer wg.Done() + isSampled(t, remoteSampler, numOperations, "a") + }() + go func() { + defer wg.Done() + isSampled(t, remoteSampler, numOperations, "b") + }() +} + +func isSampled(t *testing.T, remoteSampler *RemotelyControlledSampler, numOperations int, operationNamePrefix string) { + for i := 0; i < numOperations; i++ { + runtime.Gosched() + sampled, _ := remoteSampler.IsSampled(TraceID{}, fmt.Sprintf("%s%d", operationNamePrefix, i)) + assert.True(t, sampled) + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go index 96d36c6ac6..132fb72175 100644 --- a/vendor/github.com/uber/jaeger-client-go/span.go +++ b/vendor/github.com/uber/jaeger-client-go/span.go @@ -1,27 +1,20 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger import ( - "strings" "sync" "time" @@ -167,7 +160,6 @@ func (s *Span) appendLog(lr opentracing.LogRecord) { // SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext func (s *Span) SetBaggageItem(key, value string) opentracing.Span { - key = normalizeBaggageKey(key) s.Lock() defer s.Unlock() s.tracer.setBaggage(s, key, value) @@ -176,7 +168,6 @@ func (s *Span) SetBaggageItem(key, value string) opentracing.Span { // BaggageItem implements BaggageItem() of opentracing.SpanContext func (s *Span) BaggageItem(key string) string { - key = normalizeBaggageKey(key) s.RLock() defer s.RUnlock() return s.context.baggage[key] @@ -232,6 +223,10 @@ func (s *Span) OperationName() string { return s.operationName } +func (s *Span) serviceName() string { + return s.tracer.serviceName +} + func setSamplingPriority(s *Span, value interface{}) bool { s.Lock() defer s.Unlock() @@ -245,10 +240,3 @@ func setSamplingPriority(s *Span, value interface{}) bool { } return false } - -// Converts end-user baggage key into internal representation. -// Used for both read and write access to baggage items. -func normalizeBaggageKey(key string) string { - // TODO(yurishkuro) normalizeBaggageKey: cache the results in some bounded LRU cache - return strings.Replace(strings.ToLower(key), "_", "-", -1) -} diff --git a/vendor/github.com/uber/jaeger-client-go/span_test.go b/vendor/github.com/uber/jaeger-client-go/span_test.go new file mode 100644 index 0000000000..471ffac033 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/span_test.go @@ -0,0 +1,90 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + "github.com/opentracing/opentracing-go" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBaggageIterator(t *testing.T) { + service := "DOOP" + tracer, closer := NewTracer(service, NewConstSampler(true), NewNullReporter()) + defer closer.Close() + + sp1 := tracer.StartSpan("s1").(*Span) + sp1.SetBaggageItem("Some_Key", "12345") + sp1.SetBaggageItem("Some-other-key", "42") + expectedBaggage := map[string]string{"Some_Key": "12345", "Some-other-key": "42"} + assertBaggage(t, sp1, expectedBaggage) + assertBaggageRecords(t, sp1, expectedBaggage) + + b := extractBaggage(sp1, false) // break out early + assert.Equal(t, 1, len(b), "only one baggage item should be extracted") + + sp2 := tracer.StartSpan("s2", opentracing.ChildOf(sp1.Context())).(*Span) + assertBaggage(t, sp2, expectedBaggage) // child inherits the same baggage + require.Len(t, sp2.logs, 0) // child doesn't inherit the baggage logs +} + +func assertBaggageRecords(t *testing.T, sp *Span, expected map[string]string) { + require.Len(t, sp.logs, len(expected)) + for _, logRecord := range sp.logs { + require.Len(t, logRecord.Fields, 3) + require.Equal(t, "event:baggage", logRecord.Fields[0].String()) + key := logRecord.Fields[1].Value().(string) + value := logRecord.Fields[2].Value().(string) + + require.Contains(t, expected, key) + assert.Equal(t, expected[key], value) + } +} + +func assertBaggage(t *testing.T, sp opentracing.Span, expected map[string]string) { + b := extractBaggage(sp, true) + assert.Equal(t, expected, b) +} + +func extractBaggage(sp opentracing.Span, allItems bool) map[string]string { + b := make(map[string]string) + sp.Context().ForeachBaggageItem(func(k, v string) bool { + b[k] = v + return allItems + }) + return b +} + +func TestSpanProperties(t *testing.T) { + tracer, closer := NewTracer("DOOP", NewConstSampler(true), NewNullReporter()) + defer closer.Close() + + sp1 := tracer.StartSpan("s1").(*Span) + assert.Equal(t, tracer, sp1.Tracer()) + assert.NotNil(t, sp1.Context()) +} + +func TestSpanOperationName(t *testing.T) { + tracer, closer := NewTracer("DOOP", NewConstSampler(true), NewNullReporter()) + defer closer.Close() + + sp1 := tracer.StartSpan("s1").(*Span) + sp1.SetOperationName("s2") + sp1.Finish() + + assert.Equal(t, "s2", sp1.OperationName()) +} diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/.nocover b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go index 2fdd5ee439..49fb099e36 100644 --- a/vendor/github.com/uber/jaeger-client-go/tracer.go +++ b/vendor/github.com/uber/jaeger-client-go/tracer.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger @@ -50,9 +44,10 @@ type Tracer struct { randomNumber func() uint64 options struct { - poolSpans bool - gen128Bit bool // whether to generate 128bit trace IDs - zipkinSharedRPCSpan bool + poolSpans bool + gen128Bit bool // whether to generate 128bit trace IDs + zipkinSharedRPCSpan bool + highTraceIDGenerator func() uint64 // custom high trace ID generator // more options to come } // pool for Span objects @@ -140,6 +135,15 @@ func NewTracer( t.logger.Error("Unable to determine this host's IP address: " + err.Error()) } + if t.options.gen128Bit { + if t.options.highTraceIDGenerator == nil { + t.options.highTraceIDGenerator = t.randomNumber + } + } else if t.options.highTraceIDGenerator != nil { + t.logger.Error("Overriding high trace ID generator but not generating " + + "128 bit trace IDs, consider enabling the \"Gen128Bit\" option") + } + return t, t } @@ -211,7 +215,7 @@ func (t *Tracer) startSpanWithOptions( newTrace = true ctx.traceID.Low = t.randomID() if t.options.gen128Bit { - ctx.traceID.High = t.randomID() + ctx.traceID.High = t.options.highTraceIDGenerator() } ctx.spanID = SpanID(ctx.traceID.Low) ctx.parentID = 0 @@ -344,9 +348,8 @@ func (t *Tracer) startSpanInternal( } } // emit metrics - t.metrics.SpansStarted.Inc(1) if sp.context.IsSampled() { - t.metrics.SpansSampled.Inc(1) + t.metrics.SpansStartedSampled.Inc(1) if newTrace { // We cannot simply check for parentID==0 because in Zipkin model the // server-side RPC span has the exact same trace/span/parent IDs as the @@ -357,7 +360,7 @@ func (t *Tracer) startSpanInternal( t.metrics.TracesJoinedSampled.Inc(1) } } else { - t.metrics.SpansNotSampled.Inc(1) + t.metrics.SpansStartedNotSampled.Inc(1) if newTrace { t.metrics.TracesStartedNotSampled.Inc(1) } else if sp.firstInProcess { diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go index 2c8931e305..72edf6dbbe 100644 --- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go +++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger @@ -121,6 +115,18 @@ func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption { } } +func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption { + return func(tracer *Tracer) { + tracer.options.gen128Bit = gen128Bit + } +} + +func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption { + return func(tracer *Tracer) { + tracer.options.highTraceIDGenerator = highTraceIDGenerator + } +} + func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption { return func(tracer *Tracer) { tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_test.go b/vendor/github.com/uber/jaeger-client-go/tracer_test.go new file mode 100644 index 0000000000..6c0ea503d1 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/tracer_test.go @@ -0,0 +1,360 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "io" + "testing" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + "github.com/uber/jaeger-lib/metrics" + "github.com/uber/jaeger-lib/metrics/testutils" + + "github.com/uber/jaeger-client-go/internal/baggage" + "github.com/uber/jaeger-client-go/log" + "github.com/uber/jaeger-client-go/utils" +) + +type tracerSuite struct { + suite.Suite + tracer opentracing.Tracer + closer io.Closer + metricsFactory *metrics.LocalFactory +} + +func (s *tracerSuite) SetupTest() { + s.metricsFactory = metrics.NewLocalFactory(0) + metrics := NewMetrics(s.metricsFactory, nil) + + s.tracer, s.closer = NewTracer("DOOP", // respect the classics, man! + NewConstSampler(true), + NewNullReporter(), + TracerOptions.Metrics(metrics), + TracerOptions.ZipkinSharedRPCSpan(true), + TracerOptions.BaggageRestrictionManager(baggage.NewDefaultRestrictionManager(0)), + ) + s.NotNil(s.tracer) +} + +func (s *tracerSuite) TearDownTest() { + if s.tracer != nil { + s.closer.Close() + s.tracer = nil + } +} + +func TestTracerSuite(t *testing.T) { + suite.Run(t, new(tracerSuite)) +} + +func (s *tracerSuite) TestBeginRootSpan() { + s.metricsFactory.Clear() + startTime := time.Now() + s.tracer.(*Tracer).timeNow = func() time.Time { return startTime } + someID := uint64(12345) + s.tracer.(*Tracer).randomNumber = func() uint64 { return someID } + + sp := s.tracer.StartSpan("get_name") + ext.SpanKindRPCServer.Set(sp) + ext.PeerService.Set(sp, "peer-service") + s.NotNil(sp) + ss := sp.(*Span) + s.NotNil(ss.tracer, "Tracer must be referenced from span") + s.Equal("get_name", ss.operationName) + s.Len(ss.tags, 4, "Span should have 2 sampler tags, span.kind tag and peer.service tag") + s.EqualValues(Tag{key: "span.kind", value: ext.SpanKindRPCServerEnum}, ss.tags[2], "Span must be server-side") + s.EqualValues(Tag{key: "peer.service", value: "peer-service"}, ss.tags[3], "Client is 'peer-service'") + + s.EqualValues(someID, ss.context.traceID.Low) + s.EqualValues(0, ss.context.parentID) + + s.Equal(startTime, ss.startTime) + + sp.Finish() + s.NotNil(ss.duration) + + testutils.AssertCounterMetrics(s.T(), s.metricsFactory, []testutils.ExpectedMetric{ + {Name: "jaeger.finished_spans", Value: 1}, + {Name: "jaeger.started_spans", Tags: map[string]string{"sampled": "y"}, Value: 1}, + {Name: "jaeger.traces", Tags: map[string]string{"sampled": "y", "state": "started"}, Value: 1}, + }...) +} + +func (s *tracerSuite) TestStartRootSpanWithOptions() { + ts := time.Now() + sp := s.tracer.StartSpan("get_address", opentracing.StartTime(ts)) + ss := sp.(*Span) + s.Equal("get_address", ss.operationName) + s.Equal(ts, ss.startTime) +} + +func (s *tracerSuite) TestStartChildSpan() { + s.metricsFactory.Clear() + sp1 := s.tracer.StartSpan("get_address") + sp2 := s.tracer.StartSpan("get_street", opentracing.ChildOf(sp1.Context())) + s.Equal(sp1.(*Span).context.spanID, sp2.(*Span).context.parentID) + sp2.Finish() + s.NotNil(sp2.(*Span).duration) + sp1.Finish() + testutils.AssertCounterMetrics(s.T(), s.metricsFactory, []testutils.ExpectedMetric{ + {Name: "jaeger.started_spans", Tags: map[string]string{"sampled": "y"}, Value: 2}, + {Name: "jaeger.traces", Tags: map[string]string{"sampled": "y", "state": "started"}, Value: 1}, + {Name: "jaeger.finished_spans", Value: 2}, + }...) +} + +type nonJaegerSpanContext struct{} + +func (c nonJaegerSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +func (s *tracerSuite) TestStartSpanWithMultipleReferences() { + s.metricsFactory.Clear() + sp1 := s.tracer.StartSpan("A") + sp2 := s.tracer.StartSpan("B") + sp3 := s.tracer.StartSpan("C") + sp4 := s.tracer.StartSpan( + "D", + opentracing.ChildOf(sp1.Context()), + opentracing.ChildOf(sp2.Context()), + opentracing.FollowsFrom(sp3.Context()), + opentracing.FollowsFrom(nonJaegerSpanContext{}), + opentracing.FollowsFrom(SpanContext{}), // Empty span context should be excluded + ) + // Should use the first ChildOf ref span as the parent + s.Equal(sp1.(*Span).context.spanID, sp4.(*Span).context.parentID) + sp4.Finish() + s.NotNil(sp4.(*Span).duration) + sp3.Finish() + sp2.Finish() + sp1.Finish() + testutils.AssertCounterMetrics(s.T(), s.metricsFactory, []testutils.ExpectedMetric{ + {Name: "jaeger.started_spans", Tags: map[string]string{"sampled": "y"}, Value: 4}, + {Name: "jaeger.traces", Tags: map[string]string{"sampled": "y", "state": "started"}, Value: 3}, + {Name: "jaeger.finished_spans", Value: 4}, + }...) + assert.Len(s.T(), sp4.(*Span).references, 3) +} + +func (s *tracerSuite) TestStartSpanWithOnlyFollowFromReference() { + s.metricsFactory.Clear() + sp1 := s.tracer.StartSpan("A") + sp2 := s.tracer.StartSpan( + "B", + opentracing.FollowsFrom(sp1.Context()), + ) + // Should use the first ChildOf ref span as the parent + s.Equal(sp1.(*Span).context.spanID, sp2.(*Span).context.parentID) + sp2.Finish() + s.NotNil(sp2.(*Span).duration) + sp1.Finish() + testutils.AssertCounterMetrics(s.T(), s.metricsFactory, []testutils.ExpectedMetric{ + {Name: "jaeger.started_spans", Tags: map[string]string{"sampled": "y"}, Value: 2}, + {Name: "jaeger.traces", Tags: map[string]string{"sampled": "y", "state": "started"}, Value: 1}, + {Name: "jaeger.finished_spans", Value: 2}, + }...) + assert.Len(s.T(), sp2.(*Span).references, 1) +} + +func (s *tracerSuite) TestTraceStartedOrJoinedMetrics() { + tests := []struct { + sampled bool + label string + }{ + {true, "y"}, + {false, "n"}, + } + for _, test := range tests { + s.metricsFactory.Clear() + s.tracer.(*Tracer).sampler = NewConstSampler(test.sampled) + sp1 := s.tracer.StartSpan("parent", ext.RPCServerOption(nil)) + sp2 := s.tracer.StartSpan("child1", opentracing.ChildOf(sp1.Context())) + sp3 := s.tracer.StartSpan("child2", ext.RPCServerOption(sp2.Context())) + s.Equal(sp2.(*Span).context.spanID, sp3.(*Span).context.spanID) + s.Equal(sp2.(*Span).context.parentID, sp3.(*Span).context.parentID) + sp3.Finish() + sp2.Finish() + sp1.Finish() + s.Equal(test.sampled, sp1.Context().(SpanContext).IsSampled()) + s.Equal(test.sampled, sp2.Context().(SpanContext).IsSampled()) + + testutils.AssertCounterMetrics(s.T(), s.metricsFactory, []testutils.ExpectedMetric{ + {Name: "jaeger.started_spans", Tags: map[string]string{"sampled": test.label}, Value: 3}, + {Name: "jaeger.finished_spans", Value: 3}, + {Name: "jaeger.traces", Tags: map[string]string{"sampled": test.label, "state": "started"}, Value: 1}, + {Name: "jaeger.traces", Tags: map[string]string{"sampled": test.label, "state": "joined"}, Value: 1}, + }...) + } +} + +func (s *tracerSuite) TestSetOperationName() { + sp1 := s.tracer.StartSpan("get_address") + sp1.SetOperationName("get_street") + s.Equal("get_street", sp1.(*Span).operationName) +} + +func (s *tracerSuite) TestSamplerEffects() { + s.tracer.(*Tracer).sampler = NewConstSampler(true) + sp := s.tracer.StartSpan("test") + flags := sp.(*Span).context.flags + s.EqualValues(flagSampled, flags&flagSampled) + + s.tracer.(*Tracer).sampler = NewConstSampler(false) + sp = s.tracer.StartSpan("test") + flags = sp.(*Span).context.flags + s.EqualValues(0, flags&flagSampled) +} + +func (s *tracerSuite) TestRandomIDNotZero() { + val := uint64(0) + s.tracer.(*Tracer).randomNumber = func() (r uint64) { + r = val + val++ + return + } + sp := s.tracer.StartSpan("get_name").(*Span) + s.EqualValues(TraceID{Low: 1}, sp.context.traceID) + + rng := utils.NewRand(0) + rng.Seed(1) // for test coverage +} + +func TestTracerOptions(t *testing.T) { + t1, e := time.Parse(time.RFC3339, "2012-11-01T22:08:41+00:00") + assert.NoError(t, e) + + timeNow := func() time.Time { + return t1 + } + rnd := func() uint64 { + return 1 + } + + openTracer, closer := NewTracer("DOOP", // respect the classics, man! + NewConstSampler(true), + NewNullReporter(), + TracerOptions.Logger(log.StdLogger), + TracerOptions.TimeNow(timeNow), + TracerOptions.RandomNumber(rnd), + TracerOptions.PoolSpans(true), + TracerOptions.Tag("tag_key", "tag_value"), + ) + defer closer.Close() + + tracer := openTracer.(*Tracer) + assert.Equal(t, log.StdLogger, tracer.logger) + assert.Equal(t, t1, tracer.timeNow()) + assert.Equal(t, uint64(1), tracer.randomNumber()) + assert.Equal(t, uint64(1), tracer.randomNumber()) + assert.Equal(t, uint64(1), tracer.randomNumber()) // always 1 + assert.Equal(t, true, tracer.options.poolSpans) + assert.Equal(t, opentracing.Tag{Key: "tag_key", Value: "tag_value"}, tracer.Tags()[0]) +} + +func TestInjectorExtractorOptions(t *testing.T) { + tracer, tc := NewTracer("x", NewConstSampler(true), NewNullReporter(), + TracerOptions.Injector("dummy", &dummyPropagator{}), + TracerOptions.Extractor("dummy", &dummyPropagator{}), + ) + defer tc.Close() + + sp := tracer.StartSpan("x") + c := &dummyCarrier{} + err := tracer.Inject(sp.Context(), "dummy", []int{}) + assert.Equal(t, opentracing.ErrInvalidCarrier, err) + err = tracer.Inject(sp.Context(), "dummy", c) + assert.NoError(t, err) + assert.True(t, c.ok) + + c.ok = false + _, err = tracer.Extract("dummy", []int{}) + assert.Equal(t, opentracing.ErrInvalidCarrier, err) + _, err = tracer.Extract("dummy", c) + assert.Equal(t, opentracing.ErrSpanContextNotFound, err) + c.ok = true + _, err = tracer.Extract("dummy", c) + assert.NoError(t, err) +} + +func TestEmptySpanContextAsParent(t *testing.T) { + tracer, tc := NewTracer("x", NewConstSampler(true), NewNullReporter()) + defer tc.Close() + + span := tracer.StartSpan("test", opentracing.ChildOf(emptyContext)) + ctx := span.Context().(SpanContext) + assert.True(t, ctx.traceID.IsValid()) + assert.True(t, ctx.IsValid()) +} + +func TestGen128Bit(t *testing.T) { + tracer, tc := NewTracer("x", NewConstSampler(true), NewNullReporter(), TracerOptions.Gen128Bit(true)) + defer tc.Close() + + span := tracer.StartSpan("test", opentracing.ChildOf(emptyContext)) + defer span.Finish() + traceID := span.Context().(SpanContext).TraceID() + assert.True(t, traceID.High != 0) + assert.True(t, traceID.Low != 0) +} + +func TestZipkinSharedRPCSpan(t *testing.T) { + tracer, tc := NewTracer("x", NewConstSampler(true), NewNullReporter(), TracerOptions.ZipkinSharedRPCSpan(false)) + + sp1 := tracer.StartSpan("client", ext.SpanKindRPCClient) + sp2 := tracer.StartSpan("server", opentracing.ChildOf(sp1.Context()), ext.SpanKindRPCServer) + assert.Equal(t, sp1.(*Span).context.spanID, sp2.(*Span).context.parentID) + assert.NotEqual(t, sp1.(*Span).context.spanID, sp2.(*Span).context.spanID) + sp2.Finish() + sp1.Finish() + tc.Close() + + tracer, tc = NewTracer("x", NewConstSampler(true), NewNullReporter(), TracerOptions.ZipkinSharedRPCSpan(true)) + + sp1 = tracer.StartSpan("client", ext.SpanKindRPCClient) + sp2 = tracer.StartSpan("server", opentracing.ChildOf(sp1.Context()), ext.SpanKindRPCServer) + assert.Equal(t, sp1.(*Span).context.spanID, sp2.(*Span).context.spanID) + assert.Equal(t, sp1.(*Span).context.parentID, sp2.(*Span).context.parentID) + sp2.Finish() + sp1.Finish() + tc.Close() +} + +type dummyPropagator struct{} +type dummyCarrier struct { + ok bool +} + +func (p *dummyPropagator) Inject(ctx SpanContext, carrier interface{}) error { + c, ok := carrier.(*dummyCarrier) + if !ok { + return opentracing.ErrInvalidCarrier + } + c.ok = true + return nil +} + +func (p *dummyPropagator) Extract(carrier interface{}) (SpanContext, error) { + c, ok := carrier.(*dummyCarrier) + if !ok { + return emptyContext, opentracing.ErrInvalidCarrier + } + if c.ok { + return emptyContext, nil + } + return emptyContext, opentracing.ErrSpanContextNotFound +} diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go index f02c6f3b58..c5f5b19551 100644 --- a/vendor/github.com/uber/jaeger-client-go/transport.go +++ b/vendor/github.com/uber/jaeger-client-go/transport.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go index af87a9382e..cb83cdf9b4 100644 --- a/vendor/github.com/uber/jaeger-client-go/transport_udp.go +++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp_test.go b/vendor/github.com/uber/jaeger-client-go/transport_udp_test.go new file mode 100644 index 0000000000..99ef1c22fe --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/transport_udp_test.go @@ -0,0 +1,221 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + "time" + + "github.com/apache/thrift/lib/go/thrift" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/uber/jaeger-client-go/testutils" + "github.com/uber/jaeger-client-go/thrift-gen/agent" + j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" +) + +var ( + testTracer, _ = NewTracer("svcName", NewConstSampler(false), NewNullReporter()) + jaegerTracer = testTracer.(*Tracer) +) + +func getThriftSpanByteLength(t *testing.T, span *Span) int { + jSpan := BuildJaegerThrift(span) + transport := thrift.NewTMemoryBufferLen(1000) + protocolFactory := thrift.NewTCompactProtocolFactory() + err := jSpan.Write(protocolFactory.GetProtocol(transport)) + require.NoError(t, err) + return transport.Len() +} + +func getThriftProcessByteLengthFromTracer(t *testing.T, tracer *Tracer) int { + process := buildJaegerProcessThrift(tracer) + return getThriftProcessByteLength(t, process) +} + +func getThriftProcessByteLength(t *testing.T, process *j.Process) int { + transport := thrift.NewTMemoryBufferLen(1000) + protocolFactory := thrift.NewTCompactProtocolFactory() + err := process.Write(protocolFactory.GetProtocol(transport)) + require.NoError(t, err) + return transport.Len() +} + +func TestEmitBatchOverhead(t *testing.T) { + transport := thrift.NewTMemoryBufferLen(1000) + protocolFactory := thrift.NewTCompactProtocolFactory() + client := agent.NewAgentClientFactory(transport, protocolFactory) + + span := &Span{operationName: "test-span", tracer: jaegerTracer} + spanSize := getThriftSpanByteLength(t, span) + + tests := []int{1, 2, 14, 15, 377, 500, 65000, 0xFFFF} + for i, n := range tests { + transport.Reset() + batch := make([]*j.Span, n) + processTags := make([]*j.Tag, n) + for x := 0; x < n; x++ { + batch[x] = BuildJaegerThrift(span) + processTags[x] = &j.Tag{} + } + process := &j.Process{ServiceName: "svcName", Tags: processTags} + client.SeqId = -2 // this causes the longest encoding of varint32 as 5 bytes + err := client.EmitBatch(&j.Batch{Process: process, Spans: batch}) + processSize := getThriftProcessByteLength(t, process) + require.NoError(t, err) + overhead := transport.Len() - n*spanSize - processSize + assert.True(t, overhead <= emitBatchOverhead, + "test %d, n=%d, expected overhead %d <= %d", i, n, overhead, emitBatchOverhead) + t.Logf("span count: %d, overhead: %d", n, overhead) + } +} + +func TestUDPSenderFlush(t *testing.T) { + agent, err := testutils.StartMockAgent() + require.NoError(t, err) + defer agent.Close() + + span := &Span{operationName: "test-span", tracer: jaegerTracer} + spanSize := getThriftSpanByteLength(t, span) + processSize := getThriftProcessByteLengthFromTracer(t, jaegerTracer) + + sender, err := NewUDPTransport(agent.SpanServerAddr(), 5*spanSize+processSize+emitBatchOverhead) + require.NoError(t, err) + udpSender := sender.(*udpSender) + + // test empty flush + n, err := sender.Flush() + require.NoError(t, err) + assert.Equal(t, 0, n) + + // test early flush + n, err = sender.Append(span) + require.NoError(t, err) + assert.Equal(t, 0, n, "span should be in buffer, not flushed") + buffer := udpSender.spanBuffer + require.Equal(t, 1, len(buffer), "span should be in buffer, not flushed") + assert.Equal(t, BuildJaegerThrift(span), buffer[0], "span should be in buffer, not flushed") + + n, err = sender.Flush() + require.NoError(t, err) + assert.Equal(t, 1, n) + assert.Equal(t, 0, len(udpSender.spanBuffer), "buffer should become empty") + assert.Equal(t, processSize, udpSender.byteBufferSize, "buffer size counter should be equal to the processSize") + assert.Nil(t, buffer[0], "buffer should not keep reference to the span") + + for i := 0; i < 10000; i++ { + batches := agent.GetJaegerBatches() + if len(batches) > 0 { + break + } + time.Sleep(1 * time.Millisecond) + } + batches := agent.GetJaegerBatches() + require.Equal(t, 1, len(batches), "agent should have received the batch") + require.Equal(t, 1, len(batches[0].Spans)) + assert.Equal(t, span.operationName, batches[0].Spans[0].OperationName) +} + +func TestUDPSenderAppend(t *testing.T) { + agent, err := testutils.StartMockAgent() + require.NoError(t, err) + defer agent.Close() + + span := &Span{operationName: "test-span", tracer: jaegerTracer} + spanSize := getThriftSpanByteLength(t, span) + processSize := getThriftProcessByteLengthFromTracer(t, jaegerTracer) + + tests := []struct { + bufferSizeOffset int + expectFlush bool + expectSpansFlushed int + expectBatchesFlushed int + manualFlush bool + expectSpansFlushed2 int + expectBatchesFlushed2 int + description string + }{ + {1, false, 0, 0, true, 5, 1, "in test: buffer bigger than 5 spans"}, + {0, true, 5, 1, false, 0, 0, "in test: buffer fits exactly 5 spans"}, + {-1, true, 4, 1, true, 1, 1, "in test: buffer smaller than 5 spans"}, + } + + for _, test := range tests { + bufferSize := 5*spanSize + test.bufferSizeOffset + processSize + emitBatchOverhead + sender, err := NewUDPTransport(agent.SpanServerAddr(), bufferSize) + require.NoError(t, err, test.description) + + agent.ResetJaegerBatches() + for i := 0; i < 5; i++ { + n, err := sender.Append(span) + require.NoError(t, err, test.description) + if i < 4 { + assert.Equal(t, 0, n, test.description) + } else { + assert.Equal(t, test.expectSpansFlushed, n, test.description) + } + } + if test.expectFlush { + time.Sleep(5 * time.Millisecond) + } + batches := agent.GetJaegerBatches() + require.Equal(t, test.expectBatchesFlushed, len(batches), test.description) + var spans []*j.Span + if test.expectBatchesFlushed > 0 { + spans = batches[0].Spans + } + require.Equal(t, test.expectSpansFlushed, len(spans), test.description) + for i := 0; i < test.expectSpansFlushed; i++ { + assert.Equal(t, span.operationName, spans[i].OperationName, test.description) + } + + if test.manualFlush { + agent.ResetJaegerBatches() + n, err := sender.Flush() + require.NoError(t, err, test.description) + assert.Equal(t, test.expectSpansFlushed2, n, test.description) + + time.Sleep(5 * time.Millisecond) + batches = agent.GetJaegerBatches() + require.Equal(t, test.expectBatchesFlushed2, len(batches), test.description) + spans = []*j.Span{} + if test.expectBatchesFlushed2 > 0 { + spans = batches[0].Spans + } + require.Equal(t, test.expectSpansFlushed2, len(spans), test.description) + for i := 0; i < test.expectSpansFlushed2; i++ { + assert.Equal(t, span.operationName, spans[i].OperationName, test.description) + } + } + + } +} + +func TestUDPSenderHugeSpan(t *testing.T) { + agent, err := testutils.StartMockAgent() + require.NoError(t, err) + defer agent.Close() + + span := &Span{operationName: "test-span", tracer: jaegerTracer} + spanSize := getThriftSpanByteLength(t, span) + + sender, err := NewUDPTransport(agent.SpanServerAddr(), spanSize/2+emitBatchOverhead) + require.NoError(t, err) + + n, err := sender.Append(span) + assert.Equal(t, errSpanTooLarge, err) + assert.Equal(t, 1, n) +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go index c534315005..237211f822 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package utils diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json_test.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json_test.go new file mode 100644 index 0000000000..6ee984a9aa --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json_test.go @@ -0,0 +1,58 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type testJSONStruct struct { + Name string + Age int +} + +func TestGetJSON(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.Write([]byte("{\"name\": \"Bender\", \"age\": 3}")) + })) + defer server.Close() + + var s testJSONStruct + err := GetJSON(server.URL, &s) + require.NoError(t, err) + + assert.Equal(t, "Bender", s.Name) + assert.Equal(t, 3, s.Age) +} + +func TestGetJSONErrors(t *testing.T) { + var s testJSONStruct + err := GetJSON("localhost:0", &s) + assert.Error(t, err) + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "some error", http.StatusInternalServerError) + })) + defer server.Close() + + err = GetJSON(server.URL, &s) + assert.Error(t, err) +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go index 5dac5ee2a5..b51af7713f 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/localip.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/localip.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package utils diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go index ad67634bc7..9875f7f55c 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/rand.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/rand.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package utils diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go index 58927586b0..1b8db97584 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package utils diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter_test.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter_test.go new file mode 100644 index 0000000000..a075afb425 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter_test.go @@ -0,0 +1,75 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestRateLimiter(t *testing.T) { + limiter := NewRateLimiter(2.0, 2.0) + // stop time + ts := time.Now() + limiter.(*rateLimiter).lastTick = ts + limiter.(*rateLimiter).timeNow = func() time.Time { + return ts + } + assert.True(t, limiter.CheckCredit(1.0)) + assert.True(t, limiter.CheckCredit(1.0)) + assert.False(t, limiter.CheckCredit(1.0)) + // move time 250ms forward, not enough credits to pay for 1.0 item + limiter.(*rateLimiter).timeNow = func() time.Time { + return ts.Add(time.Second / 4) + } + assert.False(t, limiter.CheckCredit(1.0)) + // move time 500ms forward, now enough credits to pay for 1.0 item + limiter.(*rateLimiter).timeNow = func() time.Time { + return ts.Add(time.Second/4 + time.Second/2) + } + assert.True(t, limiter.CheckCredit(1.0)) + assert.False(t, limiter.CheckCredit(1.0)) + // move time 5s forward, enough to accumulate credits for 10 messages, but it should still be capped at 2 + limiter.(*rateLimiter).lastTick = ts + limiter.(*rateLimiter).timeNow = func() time.Time { + return ts.Add(5 * time.Second) + } + assert.True(t, limiter.CheckCredit(1.0)) + assert.True(t, limiter.CheckCredit(1.0)) + assert.False(t, limiter.CheckCredit(1.0)) + assert.False(t, limiter.CheckCredit(1.0)) + assert.False(t, limiter.CheckCredit(1.0)) +} + +func TestMaxBalance(t *testing.T) { + limiter := NewRateLimiter(0.1, 1.0) + // stop time + ts := time.Now() + limiter.(*rateLimiter).lastTick = ts + limiter.(*rateLimiter).timeNow = func() time.Time { + return ts + } + // on initialization, should have enough credits for 1 message + assert.True(t, limiter.CheckCredit(1.0)) + + // move time 20s forward, enough to accumulate credits for 2 messages, but it should still be capped at 1 + limiter.(*rateLimiter).timeNow = func() time.Time { + return ts.Add(time.Second * 20) + } + assert.True(t, limiter.CheckCredit(1.0)) + assert.False(t, limiter.CheckCredit(1.0)) +} diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go index b1de37bef3..2374686e55 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package utils diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go index 79eaa4e21c..ac3c325d1e 100644 --- a/vendor/github.com/uber/jaeger-client-go/utils/utils.go +++ b/vendor/github.com/uber/jaeger-client-go/utils/utils.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. - -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package utils diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils_test.go b/vendor/github.com/uber/jaeger-client-go/utils/utils_test.go new file mode 100644 index 0000000000..1df8e0b351 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/utils/utils_test.go @@ -0,0 +1,91 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetLocalIP(t *testing.T) { + ip, _ := HostIP() + assert.NotNil(t, ip, "assert we have an ip") +} + +func TestParseIPToUint32(t *testing.T) { + tests := []struct { + in string + out uint32 + err error + }{ + {"1.2.3.4", 1<<24 | 2<<16 | 3<<8 | 4, nil}, + {"127.0.0.1", 127<<24 | 1, nil}, + {"localhost", 127<<24 | 1, nil}, + {"127.xxx.0.1", 0, nil}, + {"", 0, ErrEmptyIP}, + {"hostname", 0, ErrNotFourOctets}, + } + + for _, test := range tests { + intIP, err := ParseIPToUint32(test.in) + if test.err != nil { + assert.Equal(t, test.err, err) + } else { + assert.Equal(t, test.out, intIP) + } + + } +} + +func TestParsePort(t *testing.T) { + tests := []struct { + in string + out uint16 + err bool + }{ + {"123", 123, false}, + {"77777", 0, true}, // too large for 16bit + {"bad-wolf", 0, true}, + } + for _, test := range tests { + p, err := ParsePort(test.in) + if test.err { + assert.Error(t, err) + } else { + assert.Equal(t, test.out, p) + } + } +} + +func TestPackIPAsUint32(t *testing.T) { + ipv6a := net.IP{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 255, 255, 1, 2, 3, 4} + ipv6b := net.ParseIP("2001:0db8:85a3:0000:0000:8a2e:0370:7334") + assert.NotNil(t, ipv6a) + + tests := []struct { + in net.IP + out uint32 + }{ + {net.IPv4(1, 2, 3, 4), 1<<24 | 2<<16 | 3<<8 | 4}, + {ipv6a, 1<<24 | 2<<16 | 3<<8 | 4}, // IPv6 but convertible to IPv4 + {ipv6b, 0}, + } + for _, test := range tests { + ip := PackIPAsUint32(test.in) + assert.Equal(t, test.out, ip) + } +} diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go index 3667c9fbc6..636952b7f1 100644 --- a/vendor/github.com/uber/jaeger-client-go/zipkin.go +++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_test.go b/vendor/github.com/uber/jaeger-client-go/zipkin_test.go new file mode 100644 index 0000000000..2d1d464e64 --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/zipkin_test.go @@ -0,0 +1,68 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "testing" + + "github.com/opentracing/opentracing-go/ext" + "github.com/stretchr/testify/assert" +) + +func TestZipkinPropagator(t *testing.T) { + tracer, tCloser := NewTracer("x", NewConstSampler(true), NewNullReporter(), TracerOptions.ZipkinSharedRPCSpan(true)) + defer tCloser.Close() + + carrier := &TestZipkinSpan{} + sp := tracer.StartSpan("y") + + // Note: we intentionally use string as format, as that's what TChannel would need to do + if err := tracer.Inject(sp.Context(), "zipkin-span-format", carrier); err != nil { + t.Fatalf("Inject failed: %+v", err) + } + sp1 := sp.(*Span) + assert.Equal(t, sp1.context.traceID, TraceID{Low: carrier.traceID}) + assert.Equal(t, sp1.context.spanID, SpanID(carrier.spanID)) + assert.Equal(t, sp1.context.parentID, SpanID(carrier.parentID)) + assert.Equal(t, sp1.context.flags, carrier.flags) + + sp2ctx, err := tracer.Extract("zipkin-span-format", carrier) + if err != nil { + t.Fatalf("Extract failed: %+v", err) + } + sp2 := tracer.StartSpan("x", ext.RPCServerOption(sp2ctx)) + sp3 := sp2.(*Span) + assert.Equal(t, sp1.context.traceID, sp3.context.traceID) + assert.Equal(t, sp1.context.spanID, sp3.context.spanID) + assert.Equal(t, sp1.context.parentID, sp3.context.parentID) + assert.Equal(t, sp1.context.flags, sp3.context.flags) +} + +// TestZipkinSpan is a mock-up of TChannel's internal Span struct +type TestZipkinSpan struct { + traceID uint64 + parentID uint64 + spanID uint64 + flags byte +} + +func (s TestZipkinSpan) TraceID() uint64 { return s.traceID } +func (s TestZipkinSpan) ParentID() uint64 { return s.parentID } +func (s TestZipkinSpan) SpanID() uint64 { return s.spanID } +func (s TestZipkinSpan) Flags() byte { return s.flags } +func (s *TestZipkinSpan) SetTraceID(traceID uint64) { s.traceID = traceID } +func (s *TestZipkinSpan) SetSpanID(spanID uint64) { s.spanID = spanID } +func (s *TestZipkinSpan) SetParentID(parentID uint64) { s.parentID = parentID } +func (s *TestZipkinSpan) SetFlags(flags byte) { s.flags = flags } diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go index c85b608e1c..b2e9a3e647 100644 --- a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go +++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go @@ -1,22 +1,16 @@ -// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package jaeger diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span_test.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span_test.go new file mode 100644 index 0000000000..d14b4bee5b --- /dev/null +++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span_test.go @@ -0,0 +1,329 @@ +// Copyright (c) 2017 Uber Technologies, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jaeger + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/uber/jaeger-client-go/thrift-gen/zipkincore" + "github.com/uber/jaeger-client-go/utils" +) + +func TestThriftFirstInProcessSpan(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + + sp1 := tracer.StartSpan("s1").(*Span) + sp2 := tracer.StartSpan("sp2", opentracing.ChildOf(sp1.Context())).(*Span) + sp2.Finish() + sp1.Finish() + + tests := []struct { + span *Span + wantTags bool + }{ + {sp1, true}, + {sp2, false}, + } + + for _, test := range tests { + var check func(assert.TestingT, interface{}, ...interface{}) bool + if test.wantTags { + check = assert.NotNil + } else { + check = assert.Nil + } + thriftSpan := BuildZipkinThrift(test.span) + version := findBinaryAnnotation(thriftSpan, JaegerClientVersionTagKey) + hostname := findBinaryAnnotation(thriftSpan, TracerHostnameTagKey) + check(t, version) + check(t, hostname) + } +} + +func TestThriftForceSampled(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(false), // sample nothing + NewNullReporter()) + defer closer.Close() + + sp := tracer.StartSpan("s1").(*Span) + ext.SamplingPriority.Set(sp, 1) + assert.True(t, sp.context.IsSampled()) + assert.True(t, sp.context.IsDebug()) + thriftSpan := BuildZipkinThrift(sp) + assert.True(t, thriftSpan.Debug) +} + +func TestThriftSpanLogs(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + root := tracer.StartSpan("s1") + + someTime := time.Now().Add(-time.Minute) + someTimeInt64 := utils.TimeToMicrosecondsSinceEpochInt64(someTime) + + fields := func(fields ...log.Field) []log.Field { + return fields + } + tests := []struct { + fields []log.Field + logFunc func(sp opentracing.Span) + expected string + expectedTimestamp int64 + disableSampling bool + }{ + {fields: fields(log.String("event", "happened")), expected: "happened"}, + {fields: fields(log.String("something", "happened")), expected: `{"something":"happened"}`}, + {fields: fields(log.Bool("something", true)), expected: `{"something":"true"}`}, + {fields: fields(log.Int("something", 123)), expected: `{"something":"123"}`}, + {fields: fields(log.Int32("something", 123)), expected: `{"something":"123"}`}, + {fields: fields(log.Int64("something", 123)), expected: `{"something":"123"}`}, + {fields: fields(log.Uint32("something", 123)), expected: `{"something":"123"}`}, + {fields: fields(log.Uint64("something", 123)), expected: `{"something":"123"}`}, + {fields: fields(log.Float32("something", 123)), expected: `{"something":"123.000000"}`}, + {fields: fields(log.Float64("something", 123)), expected: `{"something":"123.000000"}`}, + {fields: fields(log.Error(errors.New("drugs are baaad, m-k"))), + expected: `{"error":"drugs are baaad, m-k"}`}, + {fields: fields(log.Object("something", 123)), expected: `{"something":"123"}`}, + { + fields: fields(log.Lazy(func(fv log.Encoder) { + fv.EmitBool("something", true) + })), + expected: `{"something":"true"}`, + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogKV("event", "something") + }, + expected: "something", + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogKV("non-even number of arguments") + }, + // this is a bit fragile, but ¯\_(ツ)_/¯ + expected: `{"error":"non-even keyValues len: 1","function":"LogKV"}`, + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogEvent("something") + }, + expected: "something", + }, + { + logFunc: func(sp opentracing.Span) { + sp.LogEventWithPayload("something", "payload") + }, + expected: `{"event":"something","payload":"payload"}`, + }, + { + logFunc: func(sp opentracing.Span) { + sp.Log(opentracing.LogData{Event: "something"}) + }, + expected: "something", + }, + { + logFunc: func(sp opentracing.Span) { + sp.Log(opentracing.LogData{Event: "something", Payload: "payload"}) + }, + expected: `{"event":"something","payload":"payload"}`, + }, + { + logFunc: func(sp opentracing.Span) { + sp.FinishWithOptions(opentracing.FinishOptions{ + LogRecords: []opentracing.LogRecord{ + { + Timestamp: someTime, + Fields: fields(log.String("event", "happened")), + }, + }, + }) + }, + expected: "happened", + expectedTimestamp: someTimeInt64, + }, + { + logFunc: func(sp opentracing.Span) { + sp.FinishWithOptions(opentracing.FinishOptions{ + BulkLogData: []opentracing.LogData{ + { + Timestamp: someTime, + Event: "happened", + }, + }, + }) + }, + expected: "happened", + expectedTimestamp: someTimeInt64, + }, + { + logFunc: func(sp opentracing.Span) { + sp.FinishWithOptions(opentracing.FinishOptions{ + BulkLogData: []opentracing.LogData{ + { + Timestamp: someTime, + Event: "happened", + Payload: "payload", + }, + }, + }) + }, + expected: `{"event":"happened","payload":"payload"}`, + expectedTimestamp: someTimeInt64, + }, + { + disableSampling: true, + fields: fields(log.String("event", "happened")), + expected: "", + }, + { + disableSampling: true, + logFunc: func(sp opentracing.Span) { + sp.LogKV("event", "something") + }, + expected: "", + }, + } + + for i, test := range tests { + testName := fmt.Sprintf("test-%02d", i) + sp := tracer.StartSpan(testName, opentracing.ChildOf(root.Context())) + if test.disableSampling { + ext.SamplingPriority.Set(sp, 0) + } + if test.logFunc != nil { + test.logFunc(sp) + } else if len(test.fields) > 0 { + sp.LogFields(test.fields...) + } + thriftSpan := BuildZipkinThrift(sp.(*Span)) + if test.disableSampling { + assert.Equal(t, 0, len(thriftSpan.Annotations), testName) + continue + } + assert.Equal(t, 1, len(thriftSpan.Annotations), testName) + assert.Equal(t, test.expected, thriftSpan.Annotations[0].Value, testName) + if test.expectedTimestamp != 0 { + assert.Equal(t, test.expectedTimestamp, thriftSpan.Annotations[0].Timestamp, testName) + } + } +} + +func TestThriftLocalComponentSpan(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + + tests := []struct { + addComponentTag bool + wantAnnotation string + }{ + {false, "DOOP"}, // Without COMPONENT tag the value is the service name + {true, "c1"}, + } + + for _, test := range tests { + sp := tracer.StartSpan("s1").(*Span) + if test.addComponentTag { + ext.Component.Set(sp, "c1") + } + sp.Finish() + thriftSpan := BuildZipkinThrift(sp) + + anno := findBinaryAnnotation(thriftSpan, "lc") + require.NotNil(t, anno) + assert.EqualValues(t, test.wantAnnotation, anno.Value) + } +} + +func TestSpecialTags(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + + sp := tracer.StartSpan("s1").(*Span) + ext.SpanKindRPCServer.Set(sp) + ext.PeerService.Set(sp, "peer") + ext.PeerPort.Set(sp, 80) + ext.PeerHostIPv4.Set(sp, 2130706433) + sp.Finish() + + thriftSpan := BuildZipkinThrift(sp) + // Special tags should not be copied over to binary annotations + assert.Nil(t, findBinaryAnnotation(thriftSpan, "span.kind")) + assert.Nil(t, findBinaryAnnotation(thriftSpan, "peer.service")) + assert.Nil(t, findBinaryAnnotation(thriftSpan, "peer.port")) + assert.Nil(t, findBinaryAnnotation(thriftSpan, "peer.ipv4")) + assert.Nil(t, findBinaryAnnotation(thriftSpan, "ip")) + + anno := findBinaryAnnotation(thriftSpan, "ca") + assert.NotNil(t, anno) + assert.NotNil(t, anno.Host) + assert.EqualValues(t, 80, anno.Host.Port) + assert.EqualValues(t, 2130706433, anno.Host.Ipv4) + assert.EqualValues(t, "peer", anno.Host.ServiceName) + + assert.NotNil(t, findAnnotation(thriftSpan, "sr")) + assert.NotNil(t, findAnnotation(thriftSpan, "ss")) +} + +func TestBaggageLogs(t *testing.T) { + tracer, closer := NewTracer("DOOP", + NewConstSampler(true), + NewNullReporter()) + defer closer.Close() + + sp := tracer.StartSpan("s1").(*Span) + sp.SetBaggageItem("auth.token", "token") + ext.SpanKindRPCServer.Set(sp) + sp.Finish() + + thriftSpan := BuildZipkinThrift(sp) + assert.NotNil(t, findAnnotation(thriftSpan, `{"event":"baggage","key":"auth.token","value":"token"}`)) +} + +func findAnnotation(span *zipkincore.Span, name string) *zipkincore.Annotation { + for _, a := range span.Annotations { + if a.Value == name { + return a + } + } + return nil +} + +func findBinaryAnnotation(span *zipkincore.Span, name string) *zipkincore.BinaryAnnotation { + for _, a := range span.BinaryAnnotations { + if a.Key == name { + return a + } + } + return nil +} diff --git a/vendor/github.com/uber/jaeger-lib/.gitignore b/vendor/github.com/uber/jaeger-lib/.gitignore new file mode 100644 index 0000000000..9cc7238332 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/.gitignore @@ -0,0 +1,11 @@ +*.out +*.test +*.xml +*.swp +.idea/ +.tmp/ +*.iml +*.cov +*.html +*.log +vendor/ diff --git a/vendor/github.com/uber/jaeger-lib/.travis.yml b/vendor/github.com/uber/jaeger-lib/.travis.yml new file mode 100644 index 0000000000..3a1d049b8d --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/.travis.yml @@ -0,0 +1,21 @@ +sudo: required + +services: + - docker + +language: go +go_import_path: github.com/uber/jaeger-lib + +go: + - 1.7 + +env: + global: + - GO15VENDOREXPERIMENT=1 + +install: + - make install-ci + +script: + - make test-ci + - travis_retry goveralls -coverprofile=cover.out -service=travis-ci || true diff --git a/vendor/github.com/uber/jaeger-lib/CHANGELOG.md b/vendor/github.com/uber/jaeger-lib/CHANGELOG.md new file mode 100644 index 0000000000..0510467158 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/CHANGELOG.md @@ -0,0 +1,25 @@ +Changes by Version +================== + +1.2.1 (unreleased) +------------------ + +- *breaking* Change prometheus.New() to accept options instead of fixed arguments + + +1.2.0 (2017-11-12) +------------------ + +- Support Prometheus metrics directly [#29](https://github.com/jaegertracing/jaeger-lib/pull/29). + + +1.1.0 (2017-09-10) +------------------ + +- Re-releasing the project under Apache 2.0 license. + + +1.0.0 (2017-08-22) +------------------ + +- First semver release. diff --git a/vendor/github.com/uber/jaeger-lib/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-lib/CONTRIBUTING.md new file mode 100644 index 0000000000..8fa7daa05f --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/CONTRIBUTING.md @@ -0,0 +1,163 @@ +# How to Contribute to `jaeger-lib` + +We'd love your help! + +Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub +pull requests. This document outlines some of the conventions on development +workflow, commit message formatting, contact points and other resources to make +it easier to get your contribution accepted. + +We gratefully welcome improvements to documentation as well as to code. + +# Certificate of Origin + +By contributing to this project you agree to the [Developer Certificate of +Origin](https://developercertificate.org/) (DCO). This document was created +by the Linux Kernel community and is a simple statement that you, as a +contributor, have the legal right to make the contribution. See the [DCO](DCO) +file for details. + +## Getting Started + +This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies. + +To get started, make sure you clone the Git repository into the correct location +`github.com/uber/jaeger-lib` relative to `$GOPATH`: + +``` +mkdir -p $GOPATH/src/github.com/uber +cd $GOPATH/src/github.com/uber +git clone git@github.com:jaegertracing/jaeger-lib.git jaeger-lib +cd jaeger-lib +``` + +Then install dependencies and run the tests: + +``` +git submodule update --init --recursive +glide install +make test +``` + +## Imports grouping + +This projects follows the following pattern for grouping imports in Go files: + * imports from standard library + * imports from other projects + * imports from this `jaeger-lib` project + +For example: + +```go +import ( + "fmt" + + "go.uber.org/zap" + + "github.com/uber/jaeger-lib/metrics" +) +``` + +## Making A Change + +*Before making any significant changes, please [open an +issue](https://github.com/uber/jaeger-lib/issues).* Discussing your proposed +changes ahead of time will make the contribution process smooth for everyone. + +Once we've discussed your changes and you've got your code ready, make sure +that tests are passing (`make test` or `make cover`) and open your PR. Your +pull request is most likely to be accepted if it: + +* Includes tests for new functionality. +* Follows the guidelines in [Effective + Go](https://golang.org/doc/effective_go.html) and the [Go team's common code + review comments](https://github.com/golang/go/wiki/CodeReviewComments). +* Has a [good commit + message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html). +* Each commit must be signed by the author ([see below](#sign-your-work)). + +## License + +By contributing your code, you agree to license your contribution under the terms +of the [Apache License](LICENSE). + +If you are adding a new file it should have a header like below. The easiest +way to add such header is to run `make fmt`. + +``` +// Copyright (c) 2017 The Jaeger Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +``` + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the +patch, which certifies that you wrote it or otherwise have the right to +pass it on as an open-source patch. The rules are pretty simple: if you +can certify the below (from +[developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +using your real name (sorry, no pseudonyms or anonymous contributions.) + +You can add the sign off when creating the git commit via `git commit -s`. + +If you want this to be automatic you can set up some aliases: + +``` +git config --add alias.amend "commit -s --amend" +git config --add alias.c "commit -s" +``` diff --git a/vendor/github.com/uber/jaeger-lib/DCO b/vendor/github.com/uber/jaeger-lib/DCO new file mode 100644 index 0000000000..068953d4bd --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/DCO @@ -0,0 +1,37 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. + diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE index 546ac3c9e5..261eeb9e9f 100644 --- a/vendor/github.com/uber/jaeger-lib/LICENSE +++ b/vendor/github.com/uber/jaeger-lib/LICENSE @@ -1,21 +1,201 @@ -The MIT License (MIT) - -Copyright (c) 2016 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/uber/jaeger-lib/Makefile b/vendor/github.com/uber/jaeger-lib/Makefile new file mode 100644 index 0000000000..5334d7e5b2 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/Makefile @@ -0,0 +1,84 @@ +PROJECT_ROOT=github.com/uber/jaeger-lib +PACKAGES := $(shell glide novendor | grep -v ./thrift-gen/...) +# all .go files that don't exist in hidden directories +ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen \ + -e ".*/\..*" \ + -e ".*/_.*" \ + -e ".*/mocks.*") + +export GO15VENDOREXPERIMENT=1 + +GOTEST=go test -v $(RACE) +GOLINT=golint +GOVET=go vet +GOFMT=gofmt +FMT_LOG=fmt.log +LINT_LOG=lint.log + +THRIFT_VER=0.9.3 +THRIFT_IMG=thrift:$(THRIFT_VER) +THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift +THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift" +THRIFT_GEN_DIR=thrift-gen + +PASS=$(shell printf "\033[32mPASS\033[0m") +FAIL=$(shell printf "\033[31mFAIL\033[0m") +COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/'' + +.DEFAULT_GOAL := test-and-lint + +.PHONY: test-and-lint +test-and-lint: test fmt lint + +.PHONY: test +test: + $(GOTEST) $(PACKAGES) | $(COLORIZE) + +.PHONY: fmt +fmt: + $(GOFMT) -e -s -l -w $(ALL_SRC) + ./scripts/updateLicenses.sh + +.PHONY: lint +lint: + $(GOVET) $(PACKAGES) + @cat /dev/null > $(LINT_LOG) + @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;) + @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false) + @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG) + @./scripts/updateLicenses.sh >> $(FMT_LOG) + @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false) + + +.PHONY: install +install: + glide --version || go get github.com/Masterminds/glide + glide install + + +.PHONY: cover +cover: + ./scripts/cover.sh $(shell go list $(PACKAGES)) + go tool cover -html=cover.out -o cover.html + + +idl-submodule: + git submodule init + git submodule update + +thrift-image: + $(THRIFT) -version + +.PHONY: install-ci +install-ci: install + go get github.com/wadey/gocovmerge + go get github.com/mattn/goveralls + go get golang.org/x/tools/cmd/cover + go get github.com/golang/lint/golint + + +.PHONY: test-ci +test-ci: + ./scripts/cover.sh $(shell go list $(PACKAGES)) + make lint + diff --git a/vendor/github.com/uber/jaeger-lib/README.md b/vendor/github.com/uber/jaeger-lib/README.md new file mode 100644 index 0000000000..1600e7905d --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/README.md @@ -0,0 +1,22 @@ +[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] + + +# jaeger-lib + +A collection of shared infrastructure libraries used by different +components of [Jaeger](https://github.com/uber/jaeger) backend and [jaeger-client-go](https://github.com/uber/jaeger-client-go). +This library is *not intended to be used standalone*, and provides *no guarantees of backwards compatibility*. + +The library's import path is `github.com/uber/jaeger-lib`. + +## How to Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md). + +[doc-img]: https://godoc.org/github.com/uber/jaeger-lib?status.svg +[doc]: https://godoc.org/github.com/uber/jaeger-lib +[ci-img]: https://travis-ci.org/jaegertracing/jaeger-lib.svg?branch=master +[ci]: https://travis-ci.org/jaegertracing/jaeger-lib +[cov-img]: https://coveralls.io/repos/jaegertracing/jaeger-lib/badge.svg?branch=master&service=github +[cov]: https://coveralls.io/github/jaegertracing/jaeger-lib?branch=master + diff --git a/vendor/github.com/uber/jaeger-lib/glide.lock b/vendor/github.com/uber/jaeger-lib/glide.lock new file mode 100644 index 0000000000..3974d38c71 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/glide.lock @@ -0,0 +1,78 @@ +hash: 8ca2ebd4305a4aaead18ee8cc5a84da42e95c202d53c1bd78d11436aeb8be8e4 +updated: 2017-09-20T01:48:48.588894144+02:00 +imports: +- name: github.com/beorn7/perks + version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 + subpackages: + - quantile +- name: github.com/codahale/hdrhistogram + version: f8ad88b59a584afeee9d334eff879b104439117b +- name: github.com/davecgh/go-spew + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 + subpackages: + - spew +- name: github.com/facebookgo/clock + version: 600d898af40aa09a7a93ecb9265d87b0504b6f03 +- name: github.com/go-kit/kit + version: a9ca6725cbbea455e61c6bc8a1ed28e81eb3493b + subpackages: + - log + - log/level + - metrics + - metrics/expvar + - metrics/generic + - metrics/influx + - metrics/internal/lv + - metrics/prometheus +- name: github.com/go-logfmt/logfmt + version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 +- name: github.com/go-stack/stack + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf +- name: github.com/golang/protobuf + version: 7cc19b78d562895b13596ddce7aafb59dd789318 + subpackages: + - proto +- name: github.com/influxdata/influxdb + version: f3f30726d822c4be8cd00137ba66b6e4fd68cca1 + subpackages: + - client/v2 + - models + - pkg/escape +- name: github.com/kr/logfmt + version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 +- name: github.com/matttproud/golang_protobuf_extensions + version: c12348ce28de40eed0136aa2b644d0ee0650e56c + subpackages: + - pbutil +- name: github.com/pmezard/go-difflib + version: d8ed2627bdf02c080bf22230dbb337003b7aba2d + subpackages: + - difflib +- name: github.com/prometheus/client_golang + version: c5b7fccd204277076155f10851dad72b76a49317 + subpackages: + - prometheus +- name: github.com/prometheus/client_model + version: 6f3806018612930941127f2a7c6c453ba2c527d2 + subpackages: + - go +- name: github.com/prometheus/common + version: 49fee292b27bfff7f354ee0f64e1bc4850462edf + subpackages: + - expfmt + - internal/bitbucket.org/ww/goautoneg + - model +- name: github.com/prometheus/procfs + version: a1dba9ce8baed984a2495b658c82687f8157b98f + subpackages: + - xfs +- name: github.com/stretchr/testify + version: 05e8a0eda380579888eb53c394909df027f06991 + subpackages: + - assert + - require +- name: github.com/uber-go/tally + version: be9e53c77349ae2dd4b8c03a6dc20ed9a88b9927 +- name: github.com/VividCortex/gohistogram + version: 51564d9861991fb0ad0f531c99ef602d0f9866e6 +testImports: [] diff --git a/vendor/github.com/uber/jaeger-lib/glide.yaml b/vendor/github.com/uber/jaeger-lib/glide.yaml new file mode 100644 index 0000000000..e134d1de07 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/glide.yaml @@ -0,0 +1,13 @@ +package: github.com/uber/jaeger-lib +import: +- package: github.com/codahale/hdrhistogram +- package: github.com/go-kit/kit + version: v0.5.0 + subpackages: + - metrics/influx +- package: github.com/uber-go/tally + version: '>= 2.1.0, < 4' +- package: github.com/prometheus/client_golang + version: v0.8.0 +testImport: +- package: github.com/stretchr/testify diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go index 4ba21fe21c..2a6a43efdb 100644 --- a/vendor/github.com/uber/jaeger-lib/metrics/counter.go +++ b/vendor/github.com/uber/jaeger-lib/metrics/counter.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package metrics diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go index f59d9d2a55..a744a890df 100644 --- a/vendor/github.com/uber/jaeger-lib/metrics/factory.go +++ b/vendor/github.com/uber/jaeger-lib/metrics/factory.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package metrics diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go index 4a2c1fdbe8..3c606391a0 100644 --- a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go +++ b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package metrics diff --git a/vendor/github.com/uber/jaeger-lib/metrics/local.go b/vendor/github.com/uber/jaeger-lib/metrics/local.go index 1843186ad8..8c3624849c 100644 --- a/vendor/github.com/uber/jaeger-lib/metrics/local.go +++ b/vendor/github.com/uber/jaeger-lib/metrics/local.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package metrics diff --git a/vendor/github.com/uber/jaeger-lib/metrics/local_test.go b/vendor/github.com/uber/jaeger-lib/metrics/local_test.go new file mode 100644 index 0000000000..f4dede6b09 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/local_test.go @@ -0,0 +1,116 @@ +package metrics + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestLocalMetrics(t *testing.T) { + tags := map[string]string{ + "x": "y", + } + + f := NewLocalFactory(0) + defer f.Stop() + f.Counter("my-counter", tags).Inc(4) + f.Counter("my-counter", tags).Inc(6) + f.Counter("my-counter", nil).Inc(6) + f.Counter("other-counter", nil).Inc(8) + f.Gauge("my-gauge", nil).Update(25) + f.Gauge("my-gauge", nil).Update(43) + f.Gauge("other-gauge", nil).Update(74) + f.Namespace("namespace", tags).Counter("my-counter", nil).Inc(7) + + timings := map[string][]time.Duration{ + "foo-latency": { + time.Second * 35, + time.Second * 6, + time.Millisecond * 576, + time.Second * 12, + }, + "bar-latency": { + time.Minute*4 + time.Second*34, + time.Minute*7 + time.Second*12, + time.Second * 625, + time.Second * 12, + }, + } + + for metric, timing := range timings { + for _, d := range timing { + f.Timer(metric, nil).Record(d) + } + } + + c, g := f.Snapshot() + require.NotNil(t, c) + require.NotNil(t, g) + + assert.Equal(t, map[string]int64{ + "my-counter|x=y": 10, + "my-counter": 6, + "other-counter": 8, + "namespace.my-counter|x=y": 7, + }, c) + + assert.Equal(t, map[string]int64{ + "bar-latency.P50": 278527, + "bar-latency.P75": 278527, + "bar-latency.P90": 442367, + "bar-latency.P95": 442367, + "bar-latency.P99": 442367, + "bar-latency.P999": 442367, + "foo-latency.P50": 6143, + "foo-latency.P75": 12287, + "foo-latency.P90": 36863, + "foo-latency.P95": 36863, + "foo-latency.P99": 36863, + "foo-latency.P999": 36863, + "my-gauge": 43, + "other-gauge": 74, + }, g) + + f.Clear() + c, g = f.Snapshot() + require.Empty(t, c) + require.Empty(t, g) +} + +func TestLocalMetricsInterval(t *testing.T) { + refreshInterval := time.Millisecond + const relativeCheckFrequency = 5 // check 5 times per refreshInterval + const maxChecks = 2 * relativeCheckFrequency + checkInterval := (refreshInterval * relativeCheckFrequency) / maxChecks + + f := NewLocalFactory(refreshInterval) + defer f.Stop() + + f.Timer("timer", nil).Record(1) + + f.tm.Lock() + timer := f.timers["timer"] + f.tm.Unlock() + assert.NotNil(t, timer) + + // timer.hist.Current is modified on every Rotate(), which is called by LocalBackend after every refreshInterval + getCurr := func() interface{} { + timer.Lock() + defer timer.Unlock() + return timer.hist.Current + } + + curr := getCurr() + + // wait for twice as long as the refresh interval + for i := 0; i < maxChecks; i++ { + time.Sleep(checkInterval) + + if getCurr() != curr { + return + } + } + t.Fail() +} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go index d29d72f204..0b97707b07 100644 --- a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go +++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package metrics diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics_test.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics_test.go new file mode 100644 index 0000000000..7d9226c664 --- /dev/null +++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics_test.go @@ -0,0 +1,89 @@ +package metrics + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestInitMetrics(t *testing.T) { + testMetrics := struct { + Gauge Gauge `metric:"gauge" tags:"1=one,2=two"` + Counter Counter `metric:"counter"` + Timer Timer `metric:"timer"` + }{} + + f := NewLocalFactory(0) + defer f.Stop() + + globalTags := map[string]string{"key": "value"} + + err := initMetrics(&testMetrics, f, globalTags) + assert.NoError(t, err) + + testMetrics.Gauge.Update(10) + testMetrics.Counter.Inc(5) + testMetrics.Timer.Record(time.Duration(time.Second * 35)) + + // wait for metrics + for i := 0; i < 1000; i++ { + c, _ := f.Snapshot() + if _, ok := c["counter"]; ok { + break + } + time.Sleep(1 * time.Millisecond) + } + + c, g := f.Snapshot() + + assert.EqualValues(t, 5, c["counter|key=value"]) + assert.EqualValues(t, 10, g["gauge|1=one|2=two|key=value"]) + assert.EqualValues(t, 36863, g["timer|key=value.P50"]) + + stopwatch := StartStopwatch(testMetrics.Timer) + stopwatch.Stop() + assert.True(t, 0 < stopwatch.ElapsedTime()) +} + +var ( + noMetricTag = struct { + NoMetricTag Counter + }{} + + badTags = struct { + BadTags Counter `metric:"counter" tags:"1=one,noValue"` + }{} + + invalidMetricType = struct { + InvalidMetricType int64 `metric:"counter"` + }{} +) + +func TestInitMetricsFailures(t *testing.T) { + assert.EqualError(t, initMetrics(&noMetricTag, nil, nil), "Field NoMetricTag is missing a tag 'metric'") + + assert.EqualError(t, initMetrics(&badTags, nil, nil), + "Field [BadTags]: Tag [noValue] is not of the form key=value in 'tags' string [1=one,noValue]") + + assert.EqualError(t, initMetrics(&invalidMetricType, nil, nil), + "Field InvalidMetricType is not a pointer to timer, gauge, or counter") +} + +func TestInitPanic(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Error("The code did not panic") + } + }() + + Init(&noMetricTag, NullFactory, nil) +} + +func TestNullMetrics(t *testing.T) { + // This test is just for cover + NullFactory.Timer("name", nil).Record(0) + NullFactory.Counter("name", nil).Inc(0) + NullFactory.Gauge("name", nil).Update(0) + NullFactory.Namespace("name", nil).Gauge("name2", nil).Update(0) +} diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go index 1db8aaa68f..4a8abdb539 100644 --- a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go +++ b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package metrics diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go index bed6d609e0..e18d222abb 100644 --- a/vendor/github.com/uber/jaeger-lib/metrics/timer.go +++ b/vendor/github.com/uber/jaeger-lib/metrics/timer.go @@ -1,22 +1,16 @@ // Copyright (c) 2017 Uber Technologies, Inc. // -// Permission is hereby granted, free of charge, to any person obtaining a copy -// of this software and associated documentation files (the "Software"), to deal -// in the Software without restriction, including without limitation the rights -// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -// copies of the Software, and to permit persons to whom the Software is -// furnished to do so, subject to the following conditions: +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at // -// The above copyright notice and this permission notice shall be included in -// all copies or substantial portions of the Software. +// http://www.apache.org/licenses/LICENSE-2.0 // -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -// THE SOFTWARE. +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package metrics diff --git a/vendor/golang.org/x/net/.gitattributes b/vendor/golang.org/x/net/.gitattributes new file mode 100644 index 0000000000..d2f212e5da --- /dev/null +++ b/vendor/golang.org/x/net/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/net/.gitignore b/vendor/golang.org/x/net/.gitignore new file mode 100644 index 0000000000..8339fd61d3 --- /dev/null +++ b/vendor/golang.org/x/net/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/net/AUTHORS b/vendor/golang.org/x/net/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/net/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/net/CONTRIBUTING.md b/vendor/golang.org/x/net/CONTRIBUTING.md new file mode 100644 index 0000000000..88dff59bc7 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/net/CONTRIBUTORS b/vendor/golang.org/x/net/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/net/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/net/README b/vendor/golang.org/x/net/README new file mode 100644 index 0000000000..6b13d8e505 --- /dev/null +++ b/vendor/golang.org/x/net/README @@ -0,0 +1,3 @@ +This repository holds supplementary Go networking libraries. + +To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/net/codereview.cfg b/vendor/golang.org/x/net/codereview.cfg new file mode 100644 index 0000000000..3f8b14b64e --- /dev/null +++ b/vendor/golang.org/x/net/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 0000000000..4209b6ffc1 --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,577 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 100*time.Millisecond) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o := otherContext{c} + testDeadline(o, 200*time.Millisecond, t) + + c, _ = WithTimeout(Background(), 100*time.Millisecond) + o = otherContext{c} + c, _ = WithTimeout(o, 300*time.Millisecond) + testDeadline(c, 200*time.Millisecond, t) +} + +func TestCanceledTimeout(t *testing.T) { + c, _ := WithTimeout(Background(), 200*time.Millisecond) + o := otherContext{c} + c, cancel := WithTimeout(o, 400*time.Millisecond) + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 16, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TOOD(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 0000000000..e45feec4d3 --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,146 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "io" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/context" +) + +func nop() {} + +var ( + testHookContextDoneBeforeHeaders = nop + testHookDoReturned = nop + testHookDidBodyClose = nop +) + +// Do sends an HTTP request with the provided http.Client and returns an HTTP response. +// If the client is nil, http.DefaultClient is used. +// If the context is canceled or times out, ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + + // TODO(djd): Respect any existing value of req.Cancel. + cancel := make(chan struct{}) + req.Cancel = cancel + + type responseAndError struct { + resp *http.Response + err error + } + result := make(chan responseAndError, 1) + + // Make local copies of test hooks closed over by goroutines below. + // Prevents data races in tests. + testHookDoReturned := testHookDoReturned + testHookDidBodyClose := testHookDidBodyClose + + go func() { + resp, err := client.Do(req) + testHookDoReturned() + result <- responseAndError{resp, err} + }() + + var resp *http.Response + + select { + case <-ctx.Done(): + testHookContextDoneBeforeHeaders() + close(cancel) + // Clean up after the goroutine calling client.Do: + go func() { + if r := <-result; r.resp != nil { + testHookDidBodyClose() + r.resp.Body.Close() + } + }() + return nil, ctx.Err() + case r := <-result: + var err error + resp, err = r.resp, r.err + if err != nil { + return resp, err + } + } + + c := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + close(cancel) + case <-c: + // The response's Body is closed. + } + }() + resp.Body = ¬ifyingReader{resp.Body, c} + + return resp, nil +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} + +// notifyingReader is an io.ReadCloser that closes the notify channel after +// Close is called or a Read fails on the underlying ReadCloser. +type notifyingReader struct { + io.ReadCloser + notify chan<- struct{} +} + +func (r *notifyingReader) Read(p []byte) (int, error) { + n, err := r.ReadCloser.Read(p) + if err != nil && r.notify != nil { + close(r.notify) + r.notify = nil + } + return n, err +} + +func (r *notifyingReader) Close() error { + err := r.ReadCloser.Close() + if r.notify != nil { + close(r.notify) + r.notify = nil + } + return err +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go new file mode 100644 index 0000000000..77c25ba7ee --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go @@ -0,0 +1,176 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !plan9 + +package ctxhttp + +import ( + "io/ioutil" + "net" + "net/http" + "net/http/httptest" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +const ( + requestDuration = 100 * time.Millisecond + requestBody = "ok" +) + +func TestNoTimeout(t *testing.T) { + ctx := context.Background() + resp, err := doRequest(ctx) + + if resp == nil || err != nil { + t.Fatalf("error received from client: %v %v", err, resp) + } +} + +func TestCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(requestDuration / 2) + cancel() + }() + + resp, err := doRequest(ctx) + + if resp != nil || err == nil { + t.Fatalf("expected error, didn't get one. resp: %v", resp) + } + if err != ctx.Err() { + t.Fatalf("expected error from context but got: %v", err) + } +} + +func TestCancelAfterRequest(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + + resp, err := doRequest(ctx) + + // Cancel before reading the body. + // Request.Body should still be readable after the context is canceled. + cancel() + + b, err := ioutil.ReadAll(resp.Body) + if err != nil || string(b) != requestBody { + t.Fatalf("could not read body: %q %v", b, err) + } +} + +func TestCancelAfterHangingRequest(t *testing.T) { + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.(http.Flusher).Flush() + <-w.(http.CloseNotifier).CloseNotify() + }) + + serv := httptest.NewServer(handler) + defer serv.Close() + + ctx, cancel := context.WithCancel(context.Background()) + resp, err := Get(ctx, nil, serv.URL) + if err != nil { + t.Fatalf("unexpected error in Get: %v", err) + } + + // Cancel befer reading the body. + // Reading Request.Body should fail, since the request was + // canceled before anything was written. + cancel() + + done := make(chan struct{}) + + go func() { + b, err := ioutil.ReadAll(resp.Body) + if len(b) != 0 || err == nil { + t.Errorf(`Read got (%q, %v); want ("", error)`, b, err) + } + close(done) + }() + + select { + case <-time.After(1 * time.Second): + t.Errorf("Test timed out") + case <-done: + } +} + +func doRequest(ctx context.Context) (*http.Response, error) { + var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(requestDuration) + w.Write([]byte(requestBody)) + }) + + serv := httptest.NewServer(okHandler) + defer serv.Close() + + return Get(ctx, nil, serv.URL) +} + +// golang.org/issue/14065 +func TestClosesResponseBodyOnCancel(t *testing.T) { + defer func() { testHookContextDoneBeforeHeaders = nop }() + defer func() { testHookDoReturned = nop }() + defer func() { testHookDidBodyClose = nop }() + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {})) + defer ts.Close() + + ctx, cancel := context.WithCancel(context.Background()) + + // closed when Do enters select case <-ctx.Done() + enteredDonePath := make(chan struct{}) + + testHookContextDoneBeforeHeaders = func() { + close(enteredDonePath) + } + + testHookDoReturned = func() { + // We now have the result (the Flush'd headers) at least, + // so we can cancel the request. + cancel() + + // But block the client.Do goroutine from sending + // until Do enters into the <-ctx.Done() path, since + // otherwise if both channels are readable, select + // picks a random one. + <-enteredDonePath + } + + sawBodyClose := make(chan struct{}) + testHookDidBodyClose = func() { close(sawBodyClose) } + + tr := &http.Transport{} + defer tr.CloseIdleConnections() + c := &http.Client{Transport: tr} + req, _ := http.NewRequest("GET", ts.URL, nil) + _, doErr := Do(ctx, c, req) + + select { + case <-sawBodyClose: + case <-time.After(5 * time.Second): + t.Fatal("timeout waiting for body to close") + } + + if doErr != ctx.Err() { + t.Errorf("Do error = %v; want %v", doErr, ctx.Err()) + } +} + +type noteCloseConn struct { + net.Conn + onceClose sync.Once + closefn func() +} + +func (c *noteCloseConn) Close() error { + c.onceClose.Do(c.closefn) + return c.Conn.Close() +} diff --git a/vendor/golang.org/x/net/context/withtimeout_test.go b/vendor/golang.org/x/net/context/withtimeout_test.go new file mode 100644 index 0000000000..a6754dc368 --- /dev/null +++ b/vendor/golang.org/x/net/context/withtimeout_test.go @@ -0,0 +1,26 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package context_test + +import ( + "fmt" + "time" + + "golang.org/x/net/context" +) + +func ExampleWithTimeout() { + // Pass a context with a timeout to tell a blocking function that it + // should abandon its work after the timeout elapses. + ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) + select { + case <-time.After(200 * time.Millisecond): + fmt.Println("overslept") + case <-ctx.Done(): + fmt.Println(ctx.Err()) // prints "context deadline exceeded" + } + // Output: + // context deadline exceeded +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 0000000000..4c5ad88b1e --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 0000000000..f540b196f7 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the hostname +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone "example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a hostname +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a hostname that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/per_host_test.go b/vendor/golang.org/x/net/proxy/per_host_test.go new file mode 100644 index 0000000000..a7d8095711 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host_test.go @@ -0,0 +1,55 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "net" + "reflect" + "testing" +) + +type recordingProxy struct { + addrs []string +} + +func (r *recordingProxy) Dial(network, addr string) (net.Conn, error) { + r.addrs = append(r.addrs, addr) + return nil, errors.New("recordingProxy") +} + +func TestPerHost(t *testing.T) { + var def, bypass recordingProxy + perHost := NewPerHost(&def, &bypass) + perHost.AddFromString("localhost,*.zone,127.0.0.1,10.0.0.1/8,1000::/16") + + expectedDef := []string{ + "example.com:123", + "1.2.3.4:123", + "[1001::]:123", + } + expectedBypass := []string{ + "localhost:123", + "zone:123", + "foo.zone:123", + "127.0.0.1:123", + "10.1.2.3:123", + "[1000::]:123", + } + + for _, addr := range expectedDef { + perHost.Dial("tcp", addr) + } + for _, addr := range expectedBypass { + perHost.Dial("tcp", addr) + } + + if !reflect.DeepEqual(expectedDef, def.addrs) { + t.Errorf("Hosts which went to the default proxy didn't match. Got %v, want %v", def.addrs, expectedDef) + } + if !reflect.DeepEqual(expectedBypass, bypass.addrs) { + t.Errorf("Hosts which went to the bypass proxy didn't match. Got %v, want %v", bypass.addrs, expectedBypass) + } +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 0000000000..78a8b7bee9 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,94 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := os.Getenv("no_proxy") + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} diff --git a/vendor/golang.org/x/net/proxy/proxy_test.go b/vendor/golang.org/x/net/proxy/proxy_test.go new file mode 100644 index 0000000000..c19a5c0635 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy_test.go @@ -0,0 +1,142 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "io" + "net" + "net/url" + "strconv" + "sync" + "testing" +) + +func TestFromURL(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5Domain, &wg) + + url, err := url.Parse("socks5://user:password@" + gateway.Addr().String()) + if err != nil { + t.Fatalf("url.Parse failed: %v", err) + } + proxy, err := FromURL(url, Direct) + if err != nil { + t.Fatalf("FromURL failed: %v", err) + } + _, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Fatalf("net.SplitHostPort failed: %v", err) + } + if c, err := proxy.Dial("tcp", "localhost:"+port); err != nil { + t.Fatalf("FromURL.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func TestSOCKS5(t *testing.T) { + endSystem, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer endSystem.Close() + gateway, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + t.Fatalf("net.Listen failed: %v", err) + } + defer gateway.Close() + + var wg sync.WaitGroup + wg.Add(1) + go socks5Gateway(t, gateway, endSystem, socks5IP4, &wg) + + proxy, err := SOCKS5("tcp", gateway.Addr().String(), nil, Direct) + if err != nil { + t.Fatalf("SOCKS5 failed: %v", err) + } + if c, err := proxy.Dial("tcp", endSystem.Addr().String()); err != nil { + t.Fatalf("SOCKS5.Dial failed: %v", err) + } else { + c.Close() + } + + wg.Wait() +} + +func socks5Gateway(t *testing.T, gateway, endSystem net.Listener, typ byte, wg *sync.WaitGroup) { + defer wg.Done() + + c, err := gateway.Accept() + if err != nil { + t.Errorf("net.Listener.Accept failed: %v", err) + return + } + defer c.Close() + + b := make([]byte, 32) + var n int + if typ == socks5Domain { + n = 4 + } else { + n = 3 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if _, err := c.Write([]byte{socks5Version, socks5AuthNone}); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } + if typ == socks5Domain { + n = 16 + } else { + n = 10 + } + if _, err := io.ReadFull(c, b[:n]); err != nil { + t.Errorf("io.ReadFull failed: %v", err) + return + } + if b[0] != socks5Version || b[1] != socks5Connect || b[2] != 0x00 || b[3] != typ { + t.Errorf("got an unexpected packet: %#02x %#02x %#02x %#02x", b[0], b[1], b[2], b[3]) + return + } + if typ == socks5Domain { + copy(b[:5], []byte{socks5Version, 0x00, 0x00, socks5Domain, 9}) + b = append(b, []byte("localhost")...) + } else { + copy(b[:4], []byte{socks5Version, 0x00, 0x00, socks5IP4}) + } + host, port, err := net.SplitHostPort(endSystem.Addr().String()) + if err != nil { + t.Errorf("net.SplitHostPort failed: %v", err) + return + } + b = append(b, []byte(net.ParseIP(host).To4())...) + p, err := strconv.Atoi(port) + if err != nil { + t.Errorf("strconv.Atoi failed: %v", err) + return + } + b = append(b, []byte{byte(p >> 8), byte(p)}...) + if _, err := c.Write(b); err != nil { + t.Errorf("net.Conn.Write failed: %v", err) + return + } +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 0000000000..9b9628239a --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,210 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "io" + "net" + "strconv" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928. +func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { + s := &socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type socks5 struct { + user, password string + network, addr string + forward Dialer +} + +const socks5Version = 5 + +const ( + socks5AuthNone = 0 + socks5AuthPassword = 2 +) + +const socks5Connect = 1 + +const ( + socks5IP4 = 1 + socks5Domain = 3 + socks5IP6 = 4 +) + +var socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the network net via the SOCKS5 proxy. +func (s *socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + closeConn := &conn + defer func() { + if closeConn != nil { + (*closeConn).Close() + } + }() + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return nil, errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + if buf[1] == socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, socks5IP4) + ip = ip4 + } else { + buf = append(buf, socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return nil, errors.New("proxy: destination hostname too long: " + host) + } + buf = append(buf, socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(socks5Errors) { + failure = socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case socks5IP4: + bytesToDiscard = net.IPv4len + case socks5IP6: + bytesToDiscard = net.IPv6len + case socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + closeConn = nil + return conn, nil +} diff --git a/vendor/golang.org/x/sys/.gitattributes b/vendor/golang.org/x/sys/.gitattributes new file mode 100644 index 0000000000..d2f212e5da --- /dev/null +++ b/vendor/golang.org/x/sys/.gitattributes @@ -0,0 +1,10 @@ +# Treat all files in this repo as binary, with no git magic updating +# line endings. Windows users contributing to Go will need to use a +# modern version of git and editors capable of LF line endings. +# +# We'll prevent accidental CRLF line endings from entering the repo +# via the git-review gofmt checks. +# +# See golang.org/issue/9281 + +* -text diff --git a/vendor/golang.org/x/sys/.gitignore b/vendor/golang.org/x/sys/.gitignore new file mode 100644 index 0000000000..8339fd61d3 --- /dev/null +++ b/vendor/golang.org/x/sys/.gitignore @@ -0,0 +1,2 @@ +# Add no patterns to .hgignore except for files generated by the build. +last-change diff --git a/vendor/golang.org/x/sys/AUTHORS b/vendor/golang.org/x/sys/AUTHORS new file mode 100644 index 0000000000..15167cd746 --- /dev/null +++ b/vendor/golang.org/x/sys/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/sys/CONTRIBUTING.md b/vendor/golang.org/x/sys/CONTRIBUTING.md new file mode 100644 index 0000000000..88dff59bc7 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + + +## Filing issues + +When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +**We do not accept GitHub pull requests** +(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. + diff --git a/vendor/golang.org/x/sys/CONTRIBUTORS b/vendor/golang.org/x/sys/CONTRIBUTORS new file mode 100644 index 0000000000..1c4577e968 --- /dev/null +++ b/vendor/golang.org/x/sys/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/codeskyblue/go-uuid/LICENSE b/vendor/golang.org/x/sys/LICENSE similarity index 96% rename from vendor/github.com/codeskyblue/go-uuid/LICENSE rename to vendor/golang.org/x/sys/LICENSE index ab6b011a10..6a66aea5ea 100644 --- a/vendor/github.com/codeskyblue/go-uuid/LICENSE +++ b/vendor/golang.org/x/sys/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2009 Google Inc. All rights reserved. +Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are diff --git a/vendor/golang.org/x/sys/PATENTS b/vendor/golang.org/x/sys/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/golang.org/x/sys/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sys/README.md b/vendor/golang.org/x/sys/README.md new file mode 100644 index 0000000000..ef6c9e59c2 --- /dev/null +++ b/vendor/golang.org/x/sys/README.md @@ -0,0 +1,18 @@ +# sys + +This repository holds supplemental Go packages for low-level interactions with +the operating system. + +## Download/Install + +The easiest way to install is to run `go get -u golang.org/x/sys`. You can +also manually git clone the repository to `$GOPATH/src/golang.org/x/sys`. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the sys repository is located at +https://github.com/golang/go/issues. Prefix your issue with "x/sys:" in the +subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sys/codereview.cfg b/vendor/golang.org/x/sys/codereview.cfg new file mode 100644 index 0000000000..3f8b14b64e --- /dev/null +++ b/vendor/golang.org/x/sys/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/sys/windows/asm_windows_386.s b/vendor/golang.org/x/sys/windows/asm_windows_386.s new file mode 100644 index 0000000000..1c20dd2f89 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_386.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for 386, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-8 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-4 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s new file mode 100644 index 0000000000..4d025ab556 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s @@ -0,0 +1,13 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +// System calls for amd64, Windows are implemented in runtime/syscall_windows.goc +// + +TEXT ·getprocaddress(SB), 7, $0-32 + JMP syscall·getprocaddress(SB) + +TEXT ·loadlibrary(SB), 7, $0-8 + JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go new file mode 100644 index 0000000000..e92c05b213 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -0,0 +1,378 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "sync" + "sync/atomic" + "syscall" + "unsafe" +) + +// DLLError describes reasons for DLL load failures. +type DLLError struct { + Err error + ObjName string + Msg string +} + +func (e *DLLError) Error() string { return e.Msg } + +// Implemented in runtime/syscall_windows.goc; we provide jumps to them in our assembly file. +func loadlibrary(filename *uint16) (handle uintptr, err syscall.Errno) +func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err syscall.Errno) + +// A DLL implements access to a single DLL. +type DLL struct { + Name string + Handle Handle +} + +// LoadDLL loads DLL file into memory. +// +// Warning: using LoadDLL without an absolute path name is subject to +// DLL preloading attacks. To safely load a system DLL, use LazyDLL +// with System set to true, or use LoadLibraryEx directly. +func LoadDLL(name string) (dll *DLL, err error) { + namep, err := UTF16PtrFromString(name) + if err != nil { + return nil, err + } + h, e := loadlibrary(namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to load " + name + ": " + e.Error(), + } + } + d := &DLL{ + Name: name, + Handle: Handle(h), + } + return d, nil +} + +// MustLoadDLL is like LoadDLL but panics if load operation failes. +func MustLoadDLL(name string) *DLL { + d, e := LoadDLL(name) + if e != nil { + panic(e) + } + return d +} + +// FindProc searches DLL d for procedure named name and returns *Proc +// if found. It returns an error if search fails. +func (d *DLL) FindProc(name string) (proc *Proc, err error) { + namep, err := BytePtrFromString(name) + if err != nil { + return nil, err + } + a, e := getprocaddress(uintptr(d.Handle), namep) + if e != 0 { + return nil, &DLLError{ + Err: e, + ObjName: name, + Msg: "Failed to find " + name + " procedure in " + d.Name + ": " + e.Error(), + } + } + p := &Proc{ + Dll: d, + Name: name, + addr: a, + } + return p, nil +} + +// MustFindProc is like FindProc but panics if search fails. +func (d *DLL) MustFindProc(name string) *Proc { + p, e := d.FindProc(name) + if e != nil { + panic(e) + } + return p +} + +// Release unloads DLL d from memory. +func (d *DLL) Release() (err error) { + return FreeLibrary(d.Handle) +} + +// A Proc implements access to a procedure inside a DLL. +type Proc struct { + Dll *DLL + Name string + addr uintptr +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +func (p *Proc) Addr() uintptr { + return p.addr +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *Proc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + switch len(a) { + case 0: + return syscall.Syscall(p.Addr(), uintptr(len(a)), 0, 0, 0) + case 1: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], 0, 0) + case 2: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], 0) + case 3: + return syscall.Syscall(p.Addr(), uintptr(len(a)), a[0], a[1], a[2]) + case 4: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], 0, 0) + case 5: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], 0) + case 6: + return syscall.Syscall6(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5]) + case 7: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], 0, 0) + case 8: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], 0) + case 9: + return syscall.Syscall9(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8]) + case 10: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], 0, 0) + case 11: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], 0) + case 12: + return syscall.Syscall12(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11]) + case 13: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], 0, 0) + case 14: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], 0) + case 15: + return syscall.Syscall15(p.Addr(), uintptr(len(a)), a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8], a[9], a[10], a[11], a[12], a[13], a[14]) + default: + panic("Call " + p.Name + " with too many arguments " + itoa(len(a)) + ".") + } +} + +// A LazyDLL implements access to a single DLL. +// It will delay the load of the DLL until the first +// call to its Handle method or to one of its +// LazyProc's Addr method. +type LazyDLL struct { + Name string + + // System determines whether the DLL must be loaded from the + // Windows System directory, bypassing the normal DLL search + // path. + System bool + + mu sync.Mutex + dll *DLL // non nil once DLL is loaded +} + +// Load loads DLL file d.Name into memory. It returns an error if fails. +// Load will not try to load DLL, if it is already loaded into memory. +func (d *LazyDLL) Load() error { + // Non-racy version of: + // if d.dll != nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll))) != nil { + return nil + } + d.mu.Lock() + defer d.mu.Unlock() + if d.dll != nil { + return nil + } + + // kernel32.dll is special, since it's where LoadLibraryEx comes from. + // The kernel already special-cases its name, so it's always + // loaded from system32. + var dll *DLL + var err error + if d.Name == "kernel32.dll" { + dll, err = LoadDLL(d.Name) + } else { + dll, err = loadLibraryEx(d.Name, d.System) + } + if err != nil { + return err + } + + // Non-racy version of: + // d.dll = dll + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&d.dll)), unsafe.Pointer(dll)) + return nil +} + +// mustLoad is like Load but panics if search fails. +func (d *LazyDLL) mustLoad() { + e := d.Load() + if e != nil { + panic(e) + } +} + +// Handle returns d's module handle. +func (d *LazyDLL) Handle() uintptr { + d.mustLoad() + return uintptr(d.dll.Handle) +} + +// NewProc returns a LazyProc for accessing the named procedure in the DLL d. +func (d *LazyDLL) NewProc(name string) *LazyProc { + return &LazyProc{l: d, Name: name} +} + +// NewLazyDLL creates new LazyDLL associated with DLL file. +func NewLazyDLL(name string) *LazyDLL { + return &LazyDLL{Name: name} +} + +// NewLazySystemDLL is like NewLazyDLL, but will only +// search Windows System directory for the DLL if name is +// a base name (like "advapi32.dll"). +func NewLazySystemDLL(name string) *LazyDLL { + return &LazyDLL{Name: name, System: true} +} + +// A LazyProc implements access to a procedure inside a LazyDLL. +// It delays the lookup until the Addr method is called. +type LazyProc struct { + Name string + + mu sync.Mutex + l *LazyDLL + proc *Proc +} + +// Find searches DLL for procedure named p.Name. It returns +// an error if search fails. Find will not search procedure, +// if it is already found and loaded into memory. +func (p *LazyProc) Find() error { + // Non-racy version of: + // if p.proc == nil { + if atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc))) == nil { + p.mu.Lock() + defer p.mu.Unlock() + if p.proc == nil { + e := p.l.Load() + if e != nil { + return e + } + proc, e := p.l.dll.FindProc(p.Name) + if e != nil { + return e + } + // Non-racy version of: + // p.proc = proc + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&p.proc)), unsafe.Pointer(proc)) + } + } + return nil +} + +// mustFind is like Find but panics if search fails. +func (p *LazyProc) mustFind() { + e := p.Find() + if e != nil { + panic(e) + } +} + +// Addr returns the address of the procedure represented by p. +// The return value can be passed to Syscall to run the procedure. +// It will panic if the procedure cannot be found. +func (p *LazyProc) Addr() uintptr { + p.mustFind() + return p.proc.Addr() +} + +//go:uintptrescapes + +// Call executes procedure p with arguments a. It will panic, if more than 15 arguments +// are supplied. It will also panic if the procedure cannot be found. +// +// The returned error is always non-nil, constructed from the result of GetLastError. +// Callers must inspect the primary return value to decide whether an error occurred +// (according to the semantics of the specific function being called) before consulting +// the error. The error will be guaranteed to contain windows.Errno. +func (p *LazyProc) Call(a ...uintptr) (r1, r2 uintptr, lastErr error) { + p.mustFind() + return p.proc.Call(a...) +} + +var canDoSearchSystem32Once struct { + sync.Once + v bool +} + +func initCanDoSearchSystem32() { + // https://msdn.microsoft.com/en-us/library/ms684179(v=vs.85).aspx says: + // "Windows 7, Windows Server 2008 R2, Windows Vista, and Windows + // Server 2008: The LOAD_LIBRARY_SEARCH_* flags are available on + // systems that have KB2533623 installed. To determine whether the + // flags are available, use GetProcAddress to get the address of the + // AddDllDirectory, RemoveDllDirectory, or SetDefaultDllDirectories + // function. If GetProcAddress succeeds, the LOAD_LIBRARY_SEARCH_* + // flags can be used with LoadLibraryEx." + canDoSearchSystem32Once.v = (modkernel32.NewProc("AddDllDirectory").Find() == nil) +} + +func canDoSearchSystem32() bool { + canDoSearchSystem32Once.Do(initCanDoSearchSystem32) + return canDoSearchSystem32Once.v +} + +func isBaseName(name string) bool { + for _, c := range name { + if c == ':' || c == '/' || c == '\\' { + return false + } + } + return true +} + +// loadLibraryEx wraps the Windows LoadLibraryEx function. +// +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684179(v=vs.85).aspx +// +// If name is not an absolute path, LoadLibraryEx searches for the DLL +// in a variety of automatic locations unless constrained by flags. +// See: https://msdn.microsoft.com/en-us/library/ff919712%28VS.85%29.aspx +func loadLibraryEx(name string, system bool) (*DLL, error) { + loadDLL := name + var flags uintptr + if system { + if canDoSearchSystem32() { + const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 + flags = LOAD_LIBRARY_SEARCH_SYSTEM32 + } else if isBaseName(name) { + // WindowsXP or unpatched Windows machine + // trying to load "foo.dll" out of the system + // folder, but LoadLibraryEx doesn't support + // that yet on their system, so emulate it. + windir, _ := Getenv("WINDIR") // old var; apparently works on XP + if windir == "" { + return nil, errString("%WINDIR% not defined") + } + loadDLL = windir + "\\System32\\" + name + } + } + h, err := LoadLibraryEx(loadDLL, 0, flags) + if err != nil { + return nil, err + } + return &DLL{Name: name, Handle: h}, nil +} + +type errString string + +func (s errString) Error() string { return string(s) } diff --git a/vendor/golang.org/x/sys/windows/env_unset.go b/vendor/golang.org/x/sys/windows/env_unset.go new file mode 100644 index 0000000000..b712c6604a --- /dev/null +++ b/vendor/golang.org/x/sys/windows/env_unset.go @@ -0,0 +1,15 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows +// +build go1.4 + +package windows + +import "syscall" + +func Unsetenv(key string) error { + // This was added in Go 1.4. + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go new file mode 100644 index 0000000000..e8292386c0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/env_windows.go @@ -0,0 +1,25 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows environment variables. + +package windows + +import "syscall" + +func Getenv(key string) (value string, found bool) { + return syscall.Getenv(key) +} + +func Setenv(key, value string) error { + return syscall.Setenv(key, value) +} + +func Clearenv() { + syscall.Clearenv() +} + +func Environ() []string { + return syscall.Environ() +} diff --git a/vendor/golang.org/x/sys/windows/eventlog.go b/vendor/golang.org/x/sys/windows/eventlog.go new file mode 100644 index 0000000000..40af946e16 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/eventlog.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + EVENTLOG_SUCCESS = 0 + EVENTLOG_ERROR_TYPE = 1 + EVENTLOG_WARNING_TYPE = 2 + EVENTLOG_INFORMATION_TYPE = 4 + EVENTLOG_AUDIT_SUCCESS = 8 + EVENTLOG_AUDIT_FAILURE = 16 +) + +//sys RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) [failretval==0] = advapi32.RegisterEventSourceW +//sys DeregisterEventSource(handle Handle) (err error) = advapi32.DeregisterEventSource +//sys ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) = advapi32.ReportEventW diff --git a/vendor/golang.org/x/sys/windows/exec_windows.go b/vendor/golang.org/x/sys/windows/exec_windows.go new file mode 100644 index 0000000000..3606c3a8b3 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/exec_windows.go @@ -0,0 +1,97 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fork, exec, wait, etc. + +package windows + +// EscapeArg rewrites command line argument s as prescribed +// in http://msdn.microsoft.com/en-us/library/ms880421. +// This function returns "" (2 double quotes) if s is empty. +// Alternatively, these transformations are done: +// - every back slash (\) is doubled, but only if immediately +// followed by double quote ("); +// - every double quote (") is escaped by back slash (\); +// - finally, s is wrapped with double quotes (arg -> "arg"), +// but only if there is space or tab inside s. +func EscapeArg(s string) string { + if len(s) == 0 { + return "\"\"" + } + n := len(s) + hasSpace := false + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '\\': + n++ + case ' ', '\t': + hasSpace = true + } + } + if hasSpace { + n += 2 + } + if n == len(s) { + return s + } + + qs := make([]byte, n) + j := 0 + if hasSpace { + qs[j] = '"' + j++ + } + slashes := 0 + for i := 0; i < len(s); i++ { + switch s[i] { + default: + slashes = 0 + qs[j] = s[i] + case '\\': + slashes++ + qs[j] = s[i] + case '"': + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '\\' + j++ + qs[j] = s[i] + } + j++ + } + if hasSpace { + for ; slashes > 0; slashes-- { + qs[j] = '\\' + j++ + } + qs[j] = '"' + j++ + } + return string(qs[:j]) +} + +func CloseOnExec(fd Handle) { + SetHandleInformation(Handle(fd), HANDLE_FLAG_INHERIT, 0) +} + +// FullPath retrieves the full path of the specified file. +func FullPath(name string) (path string, err error) { + p, err := UTF16PtrFromString(name) + if err != nil { + return "", err + } + n := uint32(100) + for { + buf := make([]uint16, n) + n, err = GetFullPathName(p, uint32(len(buf)), &buf[0], nil) + if err != nil { + return "", err + } + if n <= uint32(len(buf)) { + return UTF16ToString(buf[:n]), nil + } + } +} diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go new file mode 100644 index 0000000000..f80a4204f0 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/memory_windows.go @@ -0,0 +1,26 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +const ( + MEM_COMMIT = 0x00001000 + MEM_RESERVE = 0x00002000 + MEM_DECOMMIT = 0x00004000 + MEM_RELEASE = 0x00008000 + MEM_RESET = 0x00080000 + MEM_TOP_DOWN = 0x00100000 + MEM_WRITE_WATCH = 0x00200000 + MEM_PHYSICAL = 0x00400000 + MEM_RESET_UNDO = 0x01000000 + MEM_LARGE_PAGES = 0x20000000 + + PAGE_NOACCESS = 0x01 + PAGE_READONLY = 0x02 + PAGE_READWRITE = 0x04 + PAGE_WRITECOPY = 0x08 + PAGE_EXECUTE_READ = 0x20 + PAGE_EXECUTE_READWRITE = 0x40 + PAGE_EXECUTE_WRITECOPY = 0x80 +) diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go new file mode 100644 index 0000000000..fb7db0ef8d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -0,0 +1,7 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go new file mode 100644 index 0000000000..a74e3e24b5 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race.go @@ -0,0 +1,30 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,race + +package windows + +import ( + "runtime" + "unsafe" +) + +const raceenabled = true + +func raceAcquire(addr unsafe.Pointer) { + runtime.RaceAcquire(addr) +} + +func raceReleaseMerge(addr unsafe.Pointer) { + runtime.RaceReleaseMerge(addr) +} + +func raceReadRange(addr unsafe.Pointer, len int) { + runtime.RaceReadRange(addr, len) +} + +func raceWriteRange(addr unsafe.Pointer, len int) { + runtime.RaceWriteRange(addr, len) +} diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go new file mode 100644 index 0000000000..e44a3cbf67 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/race0.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!race + +package windows + +import ( + "unsafe" +) + +const raceenabled = false + +func raceAcquire(addr unsafe.Pointer) { +} + +func raceReleaseMerge(addr unsafe.Pointer) { +} + +func raceReadRange(addr unsafe.Pointer, len int) { +} + +func raceWriteRange(addr unsafe.Pointer, len int) { +} diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go new file mode 100644 index 0000000000..f1ec5dc4ee --- /dev/null +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -0,0 +1,476 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import ( + "syscall" + "unsafe" +) + +const ( + STANDARD_RIGHTS_REQUIRED = 0xf0000 + STANDARD_RIGHTS_READ = 0x20000 + STANDARD_RIGHTS_WRITE = 0x20000 + STANDARD_RIGHTS_EXECUTE = 0x20000 + STANDARD_RIGHTS_ALL = 0x1F0000 +) + +const ( + NameUnknown = 0 + NameFullyQualifiedDN = 1 + NameSamCompatible = 2 + NameDisplay = 3 + NameUniqueId = 6 + NameCanonical = 7 + NameUserPrincipal = 8 + NameCanonicalEx = 9 + NameServicePrincipal = 10 + NameDnsDomain = 12 +) + +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +// http://blogs.msdn.com/b/drnick/archive/2007/12/19/windows-and-upn-format-credentials.aspx +//sys TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.TranslateNameW +//sys GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) [failretval&0xff==0] = secur32.GetUserNameExW + +// TranslateAccountName converts a directory service +// object name from one format to another. +func TranslateAccountName(username string, from, to uint32, initSize int) (string, error) { + u, e := UTF16PtrFromString(username) + if e != nil { + return "", e + } + n := uint32(50) + for { + b := make([]uint16, n) + e = TranslateName(u, from, to, &b[0], &n) + if e == nil { + return UTF16ToString(b[:n]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +const ( + // do not reorder + NetSetupUnknownStatus = iota + NetSetupUnjoined + NetSetupWorkgroupName + NetSetupDomainName +) + +type UserInfo10 struct { + Name *uint16 + Comment *uint16 + UsrComment *uint16 + FullName *uint16 +} + +//sys NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) = netapi32.NetUserGetInfo +//sys NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) = netapi32.NetGetJoinInformation +//sys NetApiBufferFree(buf *byte) (neterr error) = netapi32.NetApiBufferFree + +const ( + // do not reorder + SidTypeUser = 1 + iota + SidTypeGroup + SidTypeDomain + SidTypeAlias + SidTypeWellKnownGroup + SidTypeDeletedAccount + SidTypeInvalid + SidTypeUnknown + SidTypeComputer + SidTypeLabel +) + +type SidIdentifierAuthority struct { + Value [6]byte +} + +var ( + SECURITY_NULL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 0}} + SECURITY_WORLD_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 1}} + SECURITY_LOCAL_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 2}} + SECURITY_CREATOR_SID_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 3}} + SECURITY_NON_UNIQUE_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 4}} + SECURITY_NT_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 5}} + SECURITY_MANDATORY_LABEL_AUTHORITY = SidIdentifierAuthority{[6]byte{0, 0, 0, 0, 0, 16}} +) + +const ( + SECURITY_NULL_RID = 0 + SECURITY_WORLD_RID = 0 + SECURITY_LOCAL_RID = 0 + SECURITY_CREATOR_OWNER_RID = 0 + SECURITY_CREATOR_GROUP_RID = 1 + SECURITY_DIALUP_RID = 1 + SECURITY_NETWORK_RID = 2 + SECURITY_BATCH_RID = 3 + SECURITY_INTERACTIVE_RID = 4 + SECURITY_LOGON_IDS_RID = 5 + SECURITY_SERVICE_RID = 6 + SECURITY_LOCAL_SYSTEM_RID = 18 + SECURITY_BUILTIN_DOMAIN_RID = 32 + SECURITY_PRINCIPAL_SELF_RID = 10 + SECURITY_CREATOR_OWNER_SERVER_RID = 0x2 + SECURITY_CREATOR_GROUP_SERVER_RID = 0x3 + SECURITY_LOGON_IDS_RID_COUNT = 0x3 + SECURITY_ANONYMOUS_LOGON_RID = 0x7 + SECURITY_PROXY_RID = 0x8 + SECURITY_ENTERPRISE_CONTROLLERS_RID = 0x9 + SECURITY_SERVER_LOGON_RID = SECURITY_ENTERPRISE_CONTROLLERS_RID + SECURITY_AUTHENTICATED_USER_RID = 0xb + SECURITY_RESTRICTED_CODE_RID = 0xc + SECURITY_NT_NON_UNIQUE_RID = 0x15 +) + +// Predefined domain-relative RIDs for local groups. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa379649(v=vs.85).aspx +const ( + DOMAIN_ALIAS_RID_ADMINS = 0x220 + DOMAIN_ALIAS_RID_USERS = 0x221 + DOMAIN_ALIAS_RID_GUESTS = 0x222 + DOMAIN_ALIAS_RID_POWER_USERS = 0x223 + DOMAIN_ALIAS_RID_ACCOUNT_OPS = 0x224 + DOMAIN_ALIAS_RID_SYSTEM_OPS = 0x225 + DOMAIN_ALIAS_RID_PRINT_OPS = 0x226 + DOMAIN_ALIAS_RID_BACKUP_OPS = 0x227 + DOMAIN_ALIAS_RID_REPLICATOR = 0x228 + DOMAIN_ALIAS_RID_RAS_SERVERS = 0x229 + DOMAIN_ALIAS_RID_PREW2KCOMPACCESS = 0x22a + DOMAIN_ALIAS_RID_REMOTE_DESKTOP_USERS = 0x22b + DOMAIN_ALIAS_RID_NETWORK_CONFIGURATION_OPS = 0x22c + DOMAIN_ALIAS_RID_INCOMING_FOREST_TRUST_BUILDERS = 0x22d + DOMAIN_ALIAS_RID_MONITORING_USERS = 0X22e + DOMAIN_ALIAS_RID_LOGGING_USERS = 0x22f + DOMAIN_ALIAS_RID_AUTHORIZATIONACCESS = 0x230 + DOMAIN_ALIAS_RID_TS_LICENSE_SERVERS = 0x231 + DOMAIN_ALIAS_RID_DCOM_USERS = 0x232 + DOMAIN_ALIAS_RID_IUSERS = 0x238 + DOMAIN_ALIAS_RID_CRYPTO_OPERATORS = 0x239 + DOMAIN_ALIAS_RID_CACHEABLE_PRINCIPALS_GROUP = 0x23b + DOMAIN_ALIAS_RID_NON_CACHEABLE_PRINCIPALS_GROUP = 0x23c + DOMAIN_ALIAS_RID_EVENT_LOG_READERS_GROUP = 0x23d + DOMAIN_ALIAS_RID_CERTSVC_DCOM_ACCESS_GROUP = 0x23e +) + +//sys LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountSidW +//sys LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) = advapi32.LookupAccountNameW +//sys ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) = advapi32.ConvertStringSidToSidW +//sys GetLengthSid(sid *SID) (len uint32) = advapi32.GetLengthSid +//sys CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) = advapi32.CopySid +//sys AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) = advapi32.AllocateAndInitializeSid +//sys FreeSid(sid *SID) (err error) [failretval!=0] = advapi32.FreeSid +//sys EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) = advapi32.EqualSid + +// The security identifier (SID) structure is a variable-length +// structure used to uniquely identify users or groups. +type SID struct{} + +// StringToSid converts a string-format security identifier +// sid into a valid, functional sid. +func StringToSid(s string) (*SID, error) { + var sid *SID + p, e := UTF16PtrFromString(s) + if e != nil { + return nil, e + } + e = ConvertStringSidToSid(p, &sid) + if e != nil { + return nil, e + } + defer LocalFree((Handle)(unsafe.Pointer(sid))) + return sid.Copy() +} + +// LookupSID retrieves a security identifier sid for the account +// and the name of the domain on which the account was found. +// System specify target computer to search. +func LookupSID(system, account string) (sid *SID, domain string, accType uint32, err error) { + if len(account) == 0 { + return nil, "", 0, syscall.EINVAL + } + acc, e := UTF16PtrFromString(account) + if e != nil { + return nil, "", 0, e + } + var sys *uint16 + if len(system) > 0 { + sys, e = UTF16PtrFromString(system) + if e != nil { + return nil, "", 0, e + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]byte, n) + db := make([]uint16, dn) + sid = (*SID)(unsafe.Pointer(&b[0])) + e = LookupAccountName(sys, acc, sid, &n, &db[0], &dn, &accType) + if e == nil { + return sid, UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, "", 0, e + } + if n <= uint32(len(b)) { + return nil, "", 0, e + } + } +} + +// String converts sid to a string format +// suitable for display, storage, or transmission. +func (sid *SID) String() (string, error) { + var s *uint16 + e := ConvertSidToStringSid(sid, &s) + if e != nil { + return "", e + } + defer LocalFree((Handle)(unsafe.Pointer(s))) + return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil +} + +// Len returns the length, in bytes, of a valid security identifier sid. +func (sid *SID) Len() int { + return int(GetLengthSid(sid)) +} + +// Copy creates a duplicate of security identifier sid. +func (sid *SID) Copy() (*SID, error) { + b := make([]byte, sid.Len()) + sid2 := (*SID)(unsafe.Pointer(&b[0])) + e := CopySid(uint32(len(b)), sid2, sid) + if e != nil { + return nil, e + } + return sid2, nil +} + +// LookupAccount retrieves the name of the account for this sid +// and the name of the first domain on which this sid is found. +// System specify target computer to search for. +func (sid *SID) LookupAccount(system string) (account, domain string, accType uint32, err error) { + var sys *uint16 + if len(system) > 0 { + sys, err = UTF16PtrFromString(system) + if err != nil { + return "", "", 0, err + } + } + n := uint32(50) + dn := uint32(50) + for { + b := make([]uint16, n) + db := make([]uint16, dn) + e := LookupAccountSid(sys, sid, &b[0], &n, &db[0], &dn, &accType) + if e == nil { + return UTF16ToString(b), UTF16ToString(db), accType, nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", "", 0, e + } + if n <= uint32(len(b)) { + return "", "", 0, e + } + } +} + +const ( + // do not reorder + TOKEN_ASSIGN_PRIMARY = 1 << iota + TOKEN_DUPLICATE + TOKEN_IMPERSONATE + TOKEN_QUERY + TOKEN_QUERY_SOURCE + TOKEN_ADJUST_PRIVILEGES + TOKEN_ADJUST_GROUPS + TOKEN_ADJUST_DEFAULT + + TOKEN_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | + TOKEN_ASSIGN_PRIMARY | + TOKEN_DUPLICATE | + TOKEN_IMPERSONATE | + TOKEN_QUERY | + TOKEN_QUERY_SOURCE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_READ = STANDARD_RIGHTS_READ | TOKEN_QUERY + TOKEN_WRITE = STANDARD_RIGHTS_WRITE | + TOKEN_ADJUST_PRIVILEGES | + TOKEN_ADJUST_GROUPS | + TOKEN_ADJUST_DEFAULT + TOKEN_EXECUTE = STANDARD_RIGHTS_EXECUTE +) + +const ( + // do not reorder + TokenUser = 1 + iota + TokenGroups + TokenPrivileges + TokenOwner + TokenPrimaryGroup + TokenDefaultDacl + TokenSource + TokenType + TokenImpersonationLevel + TokenStatistics + TokenRestrictedSids + TokenSessionId + TokenGroupsAndPrivileges + TokenSessionReference + TokenSandBoxInert + TokenAuditPolicy + TokenOrigin + TokenElevationType + TokenLinkedToken + TokenElevation + TokenHasRestrictions + TokenAccessInformation + TokenVirtualizationAllowed + TokenVirtualizationEnabled + TokenIntegrityLevel + TokenUIAccess + TokenMandatoryPolicy + TokenLogonSid + MaxTokenInfoClass +) + +type SIDAndAttributes struct { + Sid *SID + Attributes uint32 +} + +type Tokenuser struct { + User SIDAndAttributes +} + +type Tokenprimarygroup struct { + PrimaryGroup *SID +} + +type Tokengroups struct { + GroupCount uint32 + Groups [1]SIDAndAttributes +} + +// Authorization Functions +//sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys OpenProcessToken(h Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken +//sys GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) = advapi32.GetTokenInformation +//sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW + +// An access token contains the security information for a logon session. +// The system creates an access token when a user logs on, and every +// process executed on behalf of the user has a copy of the token. +// The token identifies the user, the user's groups, and the user's +// privileges. The system uses the token to control access to securable +// objects and to control the ability of the user to perform various +// system-related operations on the local computer. +type Token Handle + +// OpenCurrentProcessToken opens the access token +// associated with current process. +func OpenCurrentProcessToken() (Token, error) { + p, e := GetCurrentProcess() + if e != nil { + return 0, e + } + var t Token + e = OpenProcessToken(p, TOKEN_QUERY, &t) + if e != nil { + return 0, e + } + return t, nil +} + +// Close releases access to access token. +func (t Token) Close() error { + return CloseHandle(Handle(t)) +} + +// getInfo retrieves a specified type of information about an access token. +func (t Token) getInfo(class uint32, initSize int) (unsafe.Pointer, error) { + n := uint32(initSize) + for { + b := make([]byte, n) + e := GetTokenInformation(t, class, &b[0], uint32(len(b)), &n) + if e == nil { + return unsafe.Pointer(&b[0]), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return nil, e + } + if n <= uint32(len(b)) { + return nil, e + } + } +} + +// GetTokenUser retrieves access token t user account information. +func (t Token) GetTokenUser() (*Tokenuser, error) { + i, e := t.getInfo(TokenUser, 50) + if e != nil { + return nil, e + } + return (*Tokenuser)(i), nil +} + +// GetTokenGroups retrieves group accounts associated with access token t. +func (t Token) GetTokenGroups() (*Tokengroups, error) { + i, e := t.getInfo(TokenGroups, 50) + if e != nil { + return nil, e + } + return (*Tokengroups)(i), nil +} + +// GetTokenPrimaryGroup retrieves access token t primary group information. +// A pointer to a SID structure representing a group that will become +// the primary group of any objects created by a process using this access token. +func (t Token) GetTokenPrimaryGroup() (*Tokenprimarygroup, error) { + i, e := t.getInfo(TokenPrimaryGroup, 50) + if e != nil { + return nil, e + } + return (*Tokenprimarygroup)(i), nil +} + +// GetUserProfileDirectory retrieves path to the +// root directory of the access token t user's profile. +func (t Token) GetUserProfileDirectory() (string, error) { + n := uint32(100) + for { + b := make([]uint16, n) + e := GetUserProfileDirectory(t, &b[0], &n) + if e == nil { + return UTF16ToString(b), nil + } + if e != ERROR_INSUFFICIENT_BUFFER { + return "", e + } + if n <= uint32(len(b)) { + return "", e + } + } +} + +// IsMember reports whether the access token t is a member of the provided SID. +func (t Token) IsMember(sid *SID) (bool, error) { + var b int32 + if e := checkTokenMembership(t, sid, &b); e != nil { + return false, e + } + return b != 0, nil +} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go new file mode 100644 index 0000000000..a500dd7dfc --- /dev/null +++ b/vendor/golang.org/x/sys/windows/service.go @@ -0,0 +1,164 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +const ( + SC_MANAGER_CONNECT = 1 + SC_MANAGER_CREATE_SERVICE = 2 + SC_MANAGER_ENUMERATE_SERVICE = 4 + SC_MANAGER_LOCK = 8 + SC_MANAGER_QUERY_LOCK_STATUS = 16 + SC_MANAGER_MODIFY_BOOT_CONFIG = 32 + SC_MANAGER_ALL_ACCESS = 0xf003f +) + +//sys OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenSCManagerW + +const ( + SERVICE_KERNEL_DRIVER = 1 + SERVICE_FILE_SYSTEM_DRIVER = 2 + SERVICE_ADAPTER = 4 + SERVICE_RECOGNIZER_DRIVER = 8 + SERVICE_WIN32_OWN_PROCESS = 16 + SERVICE_WIN32_SHARE_PROCESS = 32 + SERVICE_WIN32 = SERVICE_WIN32_OWN_PROCESS | SERVICE_WIN32_SHARE_PROCESS + SERVICE_INTERACTIVE_PROCESS = 256 + SERVICE_DRIVER = SERVICE_KERNEL_DRIVER | SERVICE_FILE_SYSTEM_DRIVER | SERVICE_RECOGNIZER_DRIVER + SERVICE_TYPE_ALL = SERVICE_WIN32 | SERVICE_ADAPTER | SERVICE_DRIVER | SERVICE_INTERACTIVE_PROCESS + + SERVICE_BOOT_START = 0 + SERVICE_SYSTEM_START = 1 + SERVICE_AUTO_START = 2 + SERVICE_DEMAND_START = 3 + SERVICE_DISABLED = 4 + + SERVICE_ERROR_IGNORE = 0 + SERVICE_ERROR_NORMAL = 1 + SERVICE_ERROR_SEVERE = 2 + SERVICE_ERROR_CRITICAL = 3 + + SC_STATUS_PROCESS_INFO = 0 + + SERVICE_STOPPED = 1 + SERVICE_START_PENDING = 2 + SERVICE_STOP_PENDING = 3 + SERVICE_RUNNING = 4 + SERVICE_CONTINUE_PENDING = 5 + SERVICE_PAUSE_PENDING = 6 + SERVICE_PAUSED = 7 + SERVICE_NO_CHANGE = 0xffffffff + + SERVICE_ACCEPT_STOP = 1 + SERVICE_ACCEPT_PAUSE_CONTINUE = 2 + SERVICE_ACCEPT_SHUTDOWN = 4 + SERVICE_ACCEPT_PARAMCHANGE = 8 + SERVICE_ACCEPT_NETBINDCHANGE = 16 + SERVICE_ACCEPT_HARDWAREPROFILECHANGE = 32 + SERVICE_ACCEPT_POWEREVENT = 64 + SERVICE_ACCEPT_SESSIONCHANGE = 128 + + SERVICE_CONTROL_STOP = 1 + SERVICE_CONTROL_PAUSE = 2 + SERVICE_CONTROL_CONTINUE = 3 + SERVICE_CONTROL_INTERROGATE = 4 + SERVICE_CONTROL_SHUTDOWN = 5 + SERVICE_CONTROL_PARAMCHANGE = 6 + SERVICE_CONTROL_NETBINDADD = 7 + SERVICE_CONTROL_NETBINDREMOVE = 8 + SERVICE_CONTROL_NETBINDENABLE = 9 + SERVICE_CONTROL_NETBINDDISABLE = 10 + SERVICE_CONTROL_DEVICEEVENT = 11 + SERVICE_CONTROL_HARDWAREPROFILECHANGE = 12 + SERVICE_CONTROL_POWEREVENT = 13 + SERVICE_CONTROL_SESSIONCHANGE = 14 + + SERVICE_ACTIVE = 1 + SERVICE_INACTIVE = 2 + SERVICE_STATE_ALL = 3 + + SERVICE_QUERY_CONFIG = 1 + SERVICE_CHANGE_CONFIG = 2 + SERVICE_QUERY_STATUS = 4 + SERVICE_ENUMERATE_DEPENDENTS = 8 + SERVICE_START = 16 + SERVICE_STOP = 32 + SERVICE_PAUSE_CONTINUE = 64 + SERVICE_INTERROGATE = 128 + SERVICE_USER_DEFINED_CONTROL = 256 + SERVICE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SERVICE_QUERY_CONFIG | SERVICE_CHANGE_CONFIG | SERVICE_QUERY_STATUS | SERVICE_ENUMERATE_DEPENDENTS | SERVICE_START | SERVICE_STOP | SERVICE_PAUSE_CONTINUE | SERVICE_INTERROGATE | SERVICE_USER_DEFINED_CONTROL + SERVICE_RUNS_IN_SYSTEM_PROCESS = 1 + SERVICE_CONFIG_DESCRIPTION = 1 + SERVICE_CONFIG_FAILURE_ACTIONS = 2 + + NO_ERROR = 0 + + SC_ENUM_PROCESS_INFO = 0 +) + +type SERVICE_STATUS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 +} + +type SERVICE_TABLE_ENTRY struct { + ServiceName *uint16 + ServiceProc uintptr +} + +type QUERY_SERVICE_CONFIG struct { + ServiceType uint32 + StartType uint32 + ErrorControl uint32 + BinaryPathName *uint16 + LoadOrderGroup *uint16 + TagId uint32 + Dependencies *uint16 + ServiceStartName *uint16 + DisplayName *uint16 +} + +type SERVICE_DESCRIPTION struct { + Description *uint16 +} + +type SERVICE_STATUS_PROCESS struct { + ServiceType uint32 + CurrentState uint32 + ControlsAccepted uint32 + Win32ExitCode uint32 + ServiceSpecificExitCode uint32 + CheckPoint uint32 + WaitHint uint32 + ProcessId uint32 + ServiceFlags uint32 +} + +type ENUM_SERVICE_STATUS_PROCESS struct { + ServiceName *uint16 + DisplayName *uint16 + ServiceStatusProcess SERVICE_STATUS_PROCESS +} + +//sys CloseServiceHandle(handle Handle) (err error) = advapi32.CloseServiceHandle +//sys CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) [failretval==0] = advapi32.CreateServiceW +//sys OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) [failretval==0] = advapi32.OpenServiceW +//sys DeleteService(service Handle) (err error) = advapi32.DeleteService +//sys StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) = advapi32.StartServiceW +//sys QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) = advapi32.QueryServiceStatus +//sys ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) = advapi32.ControlService +//sys StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) = advapi32.StartServiceCtrlDispatcherW +//sys SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) = advapi32.SetServiceStatus +//sys ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) = advapi32.ChangeServiceConfigW +//sys QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfigW +//sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W +//sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W +//sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW diff --git a/vendor/golang.org/x/sys/windows/str.go b/vendor/golang.org/x/sys/windows/str.go new file mode 100644 index 0000000000..917cc2aae4 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/str.go @@ -0,0 +1,22 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows + +func itoa(val int) string { // do it here rather than with fmt to avoid dependency + if val < 0 { + return "-" + itoa(-val) + } + var buf [32]byte // big enough for int64 + i := len(buf) - 1 + for val >= 10 { + buf[i] = byte(val%10 + '0') + i-- + val /= 10 + } + buf[i] = byte(val + '0') + return string(buf[i:]) +} diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go new file mode 100644 index 0000000000..b07bc2305d --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -0,0 +1,71 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package windows contains an interface to the low-level operating system +// primitives. OS details vary depending on the underlying system, and +// by default, godoc will display the OS-specific documentation for the current +// system. If you want godoc to display syscall documentation for another +// system, set $GOOS and $GOARCH to the desired system. For example, if +// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS +// to freebsd and $GOARCH to arm. +// The primary use of this package is inside other packages that provide a more +// portable interface to the system, such as "os", "time" and "net". Use +// those packages rather than this one if you can. +// For details of the functions and data types in this package consult +// the manuals for the appropriate operating system. +// These calls return err == nil to indicate success; otherwise +// err represents an operating system error describing the failure and +// holds a value of type syscall.Errno. +package windows // import "golang.org/x/sys/windows" + +import ( + "syscall" +) + +// ByteSliceFromString returns a NUL-terminated slice of bytes +// containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func ByteSliceFromString(s string) ([]byte, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + a := make([]byte, len(s)+1) + copy(a, s) + return a, nil +} + +// BytePtrFromString returns a pointer to a NUL-terminated array of +// bytes containing the text of s. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func BytePtrFromString(s string) (*byte, error) { + a, err := ByteSliceFromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +// Single-word zero for use when we need a valid pointer to 0 bytes. +// See mksyscall.pl. +var _zero uintptr + +func (ts *Timespec) Unix() (sec int64, nsec int64) { + return int64(ts.Sec), int64(ts.Nsec) +} + +func (tv *Timeval) Unix() (sec int64, nsec int64) { + return int64(tv.Sec), int64(tv.Usec) * 1000 +} + +func (ts *Timespec) Nano() int64 { + return int64(ts.Sec)*1e9 + int64(ts.Nsec) +} + +func (tv *Timeval) Nano() int64 { + return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 +} diff --git a/vendor/golang.org/x/sys/windows/syscall_test.go b/vendor/golang.org/x/sys/windows/syscall_test.go new file mode 100644 index 0000000000..d7009e44a5 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_test.go @@ -0,0 +1,53 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +package windows_test + +import ( + "syscall" + "testing" + + "golang.org/x/sys/windows" +) + +func testSetGetenv(t *testing.T, key, value string) { + err := windows.Setenv(key, value) + if err != nil { + t.Fatalf("Setenv failed to set %q: %v", value, err) + } + newvalue, found := windows.Getenv(key) + if !found { + t.Fatalf("Getenv failed to find %v variable (want value %q)", key, value) + } + if newvalue != value { + t.Fatalf("Getenv(%v) = %q; want %q", key, newvalue, value) + } +} + +func TestEnv(t *testing.T) { + testSetGetenv(t, "TESTENV", "AVALUE") + // make sure TESTENV gets set to "", not deleted + testSetGetenv(t, "TESTENV", "") +} + +func TestGetProcAddressByOrdinal(t *testing.T) { + // Attempt calling shlwapi.dll:IsOS, resolving it by ordinal, as + // suggested in + // https://msdn.microsoft.com/en-us/library/windows/desktop/bb773795.aspx + h, err := windows.LoadLibrary("shlwapi.dll") + if err != nil { + t.Fatalf("Failed to load shlwapi.dll: %s", err) + } + procIsOS, err := windows.GetProcAddressByOrdinal(h, 437) + if err != nil { + t.Fatalf("Could not find shlwapi.dll:IsOS by ordinal: %s", err) + } + const OS_NT = 1 + r, _, _ := syscall.Syscall(procIsOS, 1, OS_NT, 0, 0) + if r == 0 { + t.Error("shlwapi.dll:IsOS(OS_NT) returned 0, expected non-zero value") + } +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go new file mode 100644 index 0000000000..e0da2aa083 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -0,0 +1,1153 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Windows system calls. + +package windows + +import ( + errorspkg "errors" + "sync" + "syscall" + "unicode/utf16" + "unsafe" +) + +type Handle uintptr + +const ( + InvalidHandle = ^Handle(0) + + // Flags for DefineDosDevice. + DDD_EXACT_MATCH_ON_REMOVE = 0x00000004 + DDD_NO_BROADCAST_SYSTEM = 0x00000008 + DDD_RAW_TARGET_PATH = 0x00000001 + DDD_REMOVE_DEFINITION = 0x00000002 + + // Return values for GetDriveType. + DRIVE_UNKNOWN = 0 + DRIVE_NO_ROOT_DIR = 1 + DRIVE_REMOVABLE = 2 + DRIVE_FIXED = 3 + DRIVE_REMOTE = 4 + DRIVE_CDROM = 5 + DRIVE_RAMDISK = 6 + + // File system flags from GetVolumeInformation and GetVolumeInformationByHandle. + FILE_CASE_SENSITIVE_SEARCH = 0x00000001 + FILE_CASE_PRESERVED_NAMES = 0x00000002 + FILE_FILE_COMPRESSION = 0x00000010 + FILE_DAX_VOLUME = 0x20000000 + FILE_NAMED_STREAMS = 0x00040000 + FILE_PERSISTENT_ACLS = 0x00000008 + FILE_READ_ONLY_VOLUME = 0x00080000 + FILE_SEQUENTIAL_WRITE_ONCE = 0x00100000 + FILE_SUPPORTS_ENCRYPTION = 0x00020000 + FILE_SUPPORTS_EXTENDED_ATTRIBUTES = 0x00800000 + FILE_SUPPORTS_HARD_LINKS = 0x00400000 + FILE_SUPPORTS_OBJECT_IDS = 0x00010000 + FILE_SUPPORTS_OPEN_BY_FILE_ID = 0x01000000 + FILE_SUPPORTS_REPARSE_POINTS = 0x00000080 + FILE_SUPPORTS_SPARSE_FILES = 0x00000040 + FILE_SUPPORTS_TRANSACTIONS = 0x00200000 + FILE_SUPPORTS_USN_JOURNAL = 0x02000000 + FILE_UNICODE_ON_DISK = 0x00000004 + FILE_VOLUME_IS_COMPRESSED = 0x00008000 + FILE_VOLUME_QUOTAS = 0x00000020 +) + +// StringToUTF16 is deprecated. Use UTF16FromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16(s string) []uint16 { + a, err := UTF16FromString(s) + if err != nil { + panic("windows: string with NUL passed to StringToUTF16") + } + return a +} + +// UTF16FromString returns the UTF-16 encoding of the UTF-8 string +// s, with a terminating NUL added. If s contains a NUL byte at any +// location, it returns (nil, syscall.EINVAL). +func UTF16FromString(s string) ([]uint16, error) { + for i := 0; i < len(s); i++ { + if s[i] == 0 { + return nil, syscall.EINVAL + } + } + return utf16.Encode([]rune(s + "\x00")), nil +} + +// UTF16ToString returns the UTF-8 encoding of the UTF-16 sequence s, +// with a terminating NUL removed. +func UTF16ToString(s []uint16) string { + for i, v := range s { + if v == 0 { + s = s[0:i] + break + } + } + return string(utf16.Decode(s)) +} + +// StringToUTF16Ptr is deprecated. Use UTF16PtrFromString instead. +// If s contains a NUL byte this function panics instead of +// returning an error. +func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } + +// UTF16PtrFromString returns pointer to the UTF-16 encoding of +// the UTF-8 string s, with a terminating NUL added. If s +// contains a NUL byte at any location, it returns (nil, syscall.EINVAL). +func UTF16PtrFromString(s string) (*uint16, error) { + a, err := UTF16FromString(s) + if err != nil { + return nil, err + } + return &a[0], nil +} + +func Getpagesize() int { return 4096 } + +// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +func NewCallback(fn interface{}) uintptr { + return syscall.NewCallback(fn) +} + +// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention. +// This is useful when interoperating with Windows code requiring callbacks. +func NewCallbackCDecl(fn interface{}) uintptr { + return syscall.NewCallbackCDecl(fn) +} + +// windows api calls + +//sys GetLastError() (lasterr error) +//sys LoadLibrary(libname string) (handle Handle, err error) = LoadLibraryW +//sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW +//sys FreeLibrary(handle Handle) (err error) +//sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) +//sys GetVersion() (ver uint32, err error) +//sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW +//sys ExitProcess(exitcode uint32) +//sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW +//sys ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) +//sys SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) [failretval==0xffffffff] +//sys CloseHandle(handle Handle) (err error) +//sys GetStdHandle(stdhandle uint32) (handle Handle, err error) [failretval==InvalidHandle] +//sys SetStdHandle(stdhandle uint32, handle Handle) (err error) +//sys findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstFileW +//sys findNextFile1(handle Handle, data *win32finddata1) (err error) = FindNextFileW +//sys FindClose(handle Handle) (err error) +//sys GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) +//sys GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) = GetCurrentDirectoryW +//sys SetCurrentDirectory(path *uint16) (err error) = SetCurrentDirectoryW +//sys CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) = CreateDirectoryW +//sys RemoveDirectory(path *uint16) (err error) = RemoveDirectoryW +//sys DeleteFile(path *uint16) (err error) = DeleteFileW +//sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW +//sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW +//sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW +//sys SetEndOfFile(handle Handle) (err error) +//sys GetSystemTimeAsFileTime(time *Filetime) +//sys GetSystemTimePreciseAsFileTime(time *Filetime) +//sys GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) [failretval==0xffffffff] +//sys CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) +//sys GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) +//sys PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) +//sys CancelIo(s Handle) (err error) +//sys CancelIoEx(s Handle, o *Overlapped) (err error) +//sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW +//sys OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) +//sys TerminateProcess(handle Handle, exitcode uint32) (err error) +//sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) +//sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW +//sys GetCurrentProcess() (pseudoHandle Handle, err error) +//sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) +//sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) +//sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] +//sys GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPathW +//sys CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) +//sys GetFileType(filehandle Handle) (n uint32, err error) +//sys CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) = advapi32.CryptAcquireContextW +//sys CryptReleaseContext(provhandle Handle, flags uint32) (err error) = advapi32.CryptReleaseContext +//sys CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) = advapi32.CryptGenRandom +//sys GetEnvironmentStrings() (envs *uint16, err error) [failretval==nil] = kernel32.GetEnvironmentStringsW +//sys FreeEnvironmentStrings(envs *uint16) (err error) = kernel32.FreeEnvironmentStringsW +//sys GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) = kernel32.GetEnvironmentVariableW +//sys SetEnvironmentVariable(name *uint16, value *uint16) (err error) = kernel32.SetEnvironmentVariableW +//sys SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) +//sys GetFileAttributes(name *uint16) (attrs uint32, err error) [failretval==INVALID_FILE_ATTRIBUTES] = kernel32.GetFileAttributesW +//sys SetFileAttributes(name *uint16, attrs uint32) (err error) = kernel32.SetFileAttributesW +//sys GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) = kernel32.GetFileAttributesExW +//sys GetCommandLine() (cmd *uint16) = kernel32.GetCommandLineW +//sys CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) [failretval==nil] = shell32.CommandLineToArgvW +//sys LocalFree(hmem Handle) (handle Handle, err error) [failretval!=0] +//sys SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) +//sys FlushFileBuffers(handle Handle) (err error) +//sys GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) = kernel32.GetFullPathNameW +//sys GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) = kernel32.GetLongPathNameW +//sys GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) = kernel32.GetShortPathNameW +//sys CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) = kernel32.CreateFileMappingW +//sys MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) +//sys UnmapViewOfFile(addr uintptr) (err error) +//sys FlushViewOfFile(addr uintptr, length uintptr) (err error) +//sys VirtualLock(addr uintptr, length uintptr) (err error) +//sys VirtualUnlock(addr uintptr, length uintptr) (err error) +//sys VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) = kernel32.VirtualAlloc +//sys VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) = kernel32.VirtualFree +//sys VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) = kernel32.VirtualProtect +//sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile +//sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW +//sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore +//sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore +//sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore +//sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore +//sys CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) = crypt32.CertGetCertificateChain +//sys CertFreeCertificateChain(ctx *CertChainContext) = crypt32.CertFreeCertificateChain +//sys CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) [failretval==nil] = crypt32.CertCreateCertificateContext +//sys CertFreeCertificateContext(ctx *CertContext) (err error) = crypt32.CertFreeCertificateContext +//sys CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) = crypt32.CertVerifyCertificateChainPolicy +//sys RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) = advapi32.RegOpenKeyExW +//sys RegCloseKey(key Handle) (regerrno error) = advapi32.RegCloseKey +//sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW +//sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW +//sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW +//sys getCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId +//sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode +//sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode +//sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo +//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW +//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW +//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot +//sys Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32FirstW +//sys Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) = kernel32.Process32NextW +//sys DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) +// This function returns 1 byte BOOLEAN rather than the 4 byte BOOL. +//sys CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) [failretval&0xff==0] = CreateSymbolicLinkW +//sys CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) [failretval&0xff==0] = CreateHardLinkW +//sys GetCurrentThreadId() (id uint32) +//sys CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) = kernel32.CreateEventW +//sys CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateEventExW +//sys OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenEventW +//sys SetEvent(event Handle) (err error) = kernel32.SetEvent +//sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent +//sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent + +// Volume Management Functions +//sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW +//sys DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) = DeleteVolumeMountPointW +//sys FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeW +//sys FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) [failretval==InvalidHandle] = FindFirstVolumeMountPointW +//sys FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) = FindNextVolumeW +//sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW +//sys FindVolumeClose(findVolume Handle) (err error) +//sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) +//sys GetDriveType(rootPathName *uint16) (driveType uint32) +//sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] +//sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW +//sys GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationW +//sys GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) = GetVolumeInformationByHandleW +//sys GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) = GetVolumeNameForVolumeMountPointW +//sys GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) = GetVolumePathNameW +//sys GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) = GetVolumePathNamesForVolumeNameW +//sys QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) [failretval==0] = QueryDosDeviceW +//sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW +//sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW + +// syscall interface implementation for other packages + +// GetProcAddressByOrdinal retrieves the address of the exported +// function from module by ordinal. +func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Exit(code int) { ExitProcess(uint32(code)) } + +func makeInheritSa() *SecurityAttributes { + var sa SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func Open(path string, mode int, perm uint32) (fd Handle, err error) { + if len(path) == 0 { + return InvalidHandle, ERROR_FILE_NOT_FOUND + } + pathp, err := UTF16PtrFromString(path) + if err != nil { + return InvalidHandle, err + } + var access uint32 + switch mode & (O_RDONLY | O_WRONLY | O_RDWR) { + case O_RDONLY: + access = GENERIC_READ + case O_WRONLY: + access = GENERIC_WRITE + case O_RDWR: + access = GENERIC_READ | GENERIC_WRITE + } + if mode&O_CREAT != 0 { + access |= GENERIC_WRITE + } + if mode&O_APPEND != 0 { + access &^= GENERIC_WRITE + access |= FILE_APPEND_DATA + } + sharemode := uint32(FILE_SHARE_READ | FILE_SHARE_WRITE) + var sa *SecurityAttributes + if mode&O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(O_CREAT|O_EXCL) == (O_CREAT | O_EXCL): + createmode = CREATE_NEW + case mode&(O_CREAT|O_TRUNC) == (O_CREAT | O_TRUNC): + createmode = CREATE_ALWAYS + case mode&O_CREAT == O_CREAT: + createmode = OPEN_ALWAYS + case mode&O_TRUNC == O_TRUNC: + createmode = TRUNCATE_EXISTING + default: + createmode = OPEN_EXISTING + } + h, e := CreateFile(pathp, access, sharemode, sa, createmode, FILE_ATTRIBUTE_NORMAL, 0) + return h, e +} + +func Read(fd Handle, p []byte) (n int, err error) { + var done uint32 + e := ReadFile(fd, p, &done, nil) + if e != nil { + if e == ERROR_BROKEN_PIPE { + // NOTE(brainman): work around ERROR_BROKEN_PIPE is returned on reading EOF from stdin + return 0, nil + } + return 0, e + } + if raceenabled { + if done > 0 { + raceWriteRange(unsafe.Pointer(&p[0]), int(done)) + } + raceAcquire(unsafe.Pointer(&ioSync)) + } + return int(done), nil +} + +func Write(fd Handle, p []byte) (n int, err error) { + if raceenabled { + raceReleaseMerge(unsafe.Pointer(&ioSync)) + } + var done uint32 + e := WriteFile(fd, p, &done, nil) + if e != nil { + return 0, e + } + if raceenabled && done > 0 { + raceReadRange(unsafe.Pointer(&p[0]), int(done)) + } + return int(done), nil +} + +var ioSync int64 + +func Seek(fd Handle, offset int64, whence int) (newoffset int64, err error) { + var w uint32 + switch whence { + case 0: + w = FILE_BEGIN + case 1: + w = FILE_CURRENT + case 2: + w = FILE_END + } + hi := int32(offset >> 32) + lo := int32(offset) + // use GetFileType to check pipe, pipe can't do seek + ft, _ := GetFileType(fd) + if ft == FILE_TYPE_PIPE { + return 0, syscall.EPIPE + } + rlo, e := SetFilePointer(fd, lo, &hi, w) + if e != nil { + return 0, e + } + return int64(hi)<<32 + int64(rlo), nil +} + +func Close(fd Handle) (err error) { + return CloseHandle(fd) +} + +var ( + Stdin = getStdHandle(STD_INPUT_HANDLE) + Stdout = getStdHandle(STD_OUTPUT_HANDLE) + Stderr = getStdHandle(STD_ERROR_HANDLE) +) + +func getStdHandle(stdhandle uint32) (fd Handle) { + r, _ := GetStdHandle(stdhandle) + CloseOnExec(r) + return r +} + +const ImplementsGetwd = true + +func Getwd() (wd string, err error) { + b := make([]uint16, 300) + n, e := GetCurrentDirectory(uint32(len(b)), &b[0]) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Chdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return SetCurrentDirectory(pathp) +} + +func Mkdir(path string, mode uint32) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return CreateDirectory(pathp, nil) +} + +func Rmdir(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return RemoveDirectory(pathp) +} + +func Unlink(path string) (err error) { + pathp, err := UTF16PtrFromString(path) + if err != nil { + return err + } + return DeleteFile(pathp) +} + +func Rename(oldpath, newpath string) (err error) { + from, err := UTF16PtrFromString(oldpath) + if err != nil { + return err + } + to, err := UTF16PtrFromString(newpath) + if err != nil { + return err + } + return MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING) +} + +func ComputerName() (name string, err error) { + var n uint32 = MAX_COMPUTERNAME_LENGTH + 1 + b := make([]uint16, n) + e := GetComputerName(&b[0], &n) + if e != nil { + return "", e + } + return string(utf16.Decode(b[0:n])), nil +} + +func Ftruncate(fd Handle, length int64) (err error) { + curoffset, e := Seek(fd, 0, 1) + if e != nil { + return e + } + defer Seek(fd, curoffset, 0) + _, e = Seek(fd, length, 0) + if e != nil { + return e + } + e = SetEndOfFile(fd) + if e != nil { + return e + } + return nil +} + +func Gettimeofday(tv *Timeval) (err error) { + var ft Filetime + GetSystemTimeAsFileTime(&ft) + *tv = NsecToTimeval(ft.Nanoseconds()) + return nil +} + +func Pipe(p []Handle) (err error) { + if len(p) != 2 { + return syscall.EINVAL + } + var r, w Handle + e := CreatePipe(&r, &w, makeInheritSa(), 0) + if e != nil { + return e + } + p[0] = r + p[1] = w + return nil +} + +func Utimes(path string, tv []Timeval) (err error) { + if len(tv) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(tv[0].Nanoseconds()) + w := NsecToFiletime(tv[1].Nanoseconds()) + return SetFileTime(h, nil, &a, &w) +} + +func UtimesNano(path string, ts []Timespec) (err error) { + if len(ts) != 2 { + return syscall.EINVAL + } + pathp, e := UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := CreateFile(pathp, + FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, nil, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer Close(h) + a := NsecToFiletime(TimespecToNsec(ts[0])) + w := NsecToFiletime(TimespecToNsec(ts[1])) + return SetFileTime(h, nil, &a, &w) +} + +func Fsync(fd Handle) (err error) { + return FlushFileBuffers(fd) +} + +func Chmod(path string, mode uint32) (err error) { + if mode == 0 { + return syscall.EINVAL + } + p, e := UTF16PtrFromString(path) + if e != nil { + return e + } + attrs, e := GetFileAttributes(p) + if e != nil { + return e + } + if mode&S_IWRITE != 0 { + attrs &^= FILE_ATTRIBUTE_READONLY + } else { + attrs |= FILE_ATTRIBUTE_READONLY + } + return SetFileAttributes(p, attrs) +} + +func LoadGetSystemTimePreciseAsFileTime() error { + return procGetSystemTimePreciseAsFileTime.Find() +} + +func LoadCancelIoEx() error { + return procCancelIoEx.Find() +} + +func LoadSetFileCompletionNotificationModes() error { + return procSetFileCompletionNotificationModes.Find() +} + +// net api calls + +const socket_error = uintptr(^uint32(0)) + +//sys WSAStartup(verreq uint32, data *WSAData) (sockerr error) = ws2_32.WSAStartup +//sys WSACleanup() (err error) [failretval==socket_error] = ws2_32.WSACleanup +//sys WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) [failretval==socket_error] = ws2_32.WSAIoctl +//sys socket(af int32, typ int32, protocol int32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.socket +//sys Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) [failretval==socket_error] = ws2_32.setsockopt +//sys Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockopt +//sys bind(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.bind +//sys connect(s Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socket_error] = ws2_32.connect +//sys getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getsockname +//sys getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) [failretval==socket_error] = ws2_32.getpeername +//sys listen(s Handle, backlog int32) (err error) [failretval==socket_error] = ws2_32.listen +//sys shutdown(s Handle, how int32) (err error) [failretval==socket_error] = ws2_32.shutdown +//sys Closesocket(s Handle) (err error) [failretval==socket_error] = ws2_32.closesocket +//sys AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) = mswsock.AcceptEx +//sys GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) = mswsock.GetAcceptExSockaddrs +//sys WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecv +//sys WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASend +//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom +//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo +//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname +//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname +//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs +//sys GetProtoByName(name string) (p *Protoent, err error) [failretval==nil] = ws2_32.getprotobyname +//sys DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) = dnsapi.DnsQuery_W +//sys DnsRecordListFree(rl *DNSRecord, freetype uint32) = dnsapi.DnsRecordListFree +//sys DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) = dnsapi.DnsNameCompare_W +//sys GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) = ws2_32.GetAddrInfoW +//sys FreeAddrInfoW(addrinfo *AddrinfoW) = ws2_32.FreeAddrInfoW +//sys GetIfEntry(pIfRow *MibIfRow) (errcode error) = iphlpapi.GetIfEntry +//sys GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) = iphlpapi.GetAdaptersInfo +//sys SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) = kernel32.SetFileCompletionNotificationModes +//sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW +//sys GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) = iphlpapi.GetAdaptersAddresses +//sys GetACP() (acp uint32) = kernel32.GetACP +//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar + +// For testing: clients can set this flag to force +// creation of IPv6 sockets to return EAFNOSUPPORT. +var SocketDisableIPv6 bool + +type RawSockaddrInet4 struct { + Family uint16 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]uint8 +} + +type RawSockaddrInet6 struct { + Family uint16 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +type RawSockaddr struct { + Family uint16 + Data [14]int8 +} + +type RawSockaddrAny struct { + Addr RawSockaddr + Pad [96]int8 +} + +type Sockaddr interface { + sockaddr() (ptr unsafe.Pointer, len int32, err error) // lowercase; only we can define Sockaddrs +} + +type SockaddrInet4 struct { + Port int + Addr [4]byte + raw RawSockaddrInet4 +} + +func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrInet6 struct { + Port int + ZoneId uint32 + Addr [16]byte + raw RawSockaddrInet6 +} + +func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, int32, error) { + if sa.Port < 0 || sa.Port > 0xFFFF { + return nil, 0, syscall.EINVAL + } + sa.raw.Family = AF_INET6 + p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port)) + p[0] = byte(sa.Port >> 8) + p[1] = byte(sa.Port) + sa.raw.Scope_id = sa.ZoneId + for i := 0; i < len(sa.Addr); i++ { + sa.raw.Addr[i] = sa.Addr[i] + } + return unsafe.Pointer(&sa.raw), int32(unsafe.Sizeof(sa.raw)), nil +} + +type SockaddrUnix struct { + Name string +} + +func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, int32, error) { + // TODO(brainman): implement SockaddrUnix.sockaddr() + return nil, 0, syscall.EWINDOWS +} + +func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { + switch rsa.Addr.Family { + case AF_UNIX: + return nil, syscall.EWINDOWS + + case AF_INET: + pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet4) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + + case AF_INET6: + pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa)) + sa := new(SockaddrInet6) + p := (*[2]byte)(unsafe.Pointer(&pp.Port)) + sa.Port = int(p[0])<<8 + int(p[1]) + sa.ZoneId = pp.Scope_id + for i := 0; i < len(sa.Addr); i++ { + sa.Addr[i] = pp.Addr[i] + } + return sa, nil + } + return nil, syscall.EAFNOSUPPORT +} + +func Socket(domain, typ, proto int) (fd Handle, err error) { + if domain == AF_INET6 && SocketDisableIPv6 { + return InvalidHandle, syscall.EAFNOSUPPORT + } + return socket(int32(domain), int32(typ), int32(proto)) +} + +func SetsockoptInt(fd Handle, level, opt int, value int) (err error) { + v := int32(value) + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&v)), int32(unsafe.Sizeof(v))) +} + +func Bind(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return bind(fd, ptr, n) +} + +func Connect(fd Handle, sa Sockaddr) (err error) { + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connect(fd, ptr, n) +} + +func Getsockname(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getsockname(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Getpeername(fd Handle) (sa Sockaddr, err error) { + var rsa RawSockaddrAny + l := int32(unsafe.Sizeof(rsa)) + if err = getpeername(fd, &rsa, &l); err != nil { + return + } + return rsa.Sockaddr() +} + +func Listen(s Handle, n int) (err error) { + return listen(s, int32(n)) +} + +func Shutdown(fd Handle, how int) (err error) { + return shutdown(fd, int32(how)) +} + +func WSASendto(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to Sockaddr, overlapped *Overlapped, croutine *byte) (err error) { + rsa, l, err := to.sockaddr() + if err != nil { + return err + } + return WSASendTo(s, bufs, bufcnt, sent, flags, (*RawSockaddrAny)(unsafe.Pointer(rsa)), l, overlapped, croutine) +} + +func LoadGetAddrInfo() error { + return procGetAddrInfoW.Find() +} + +var connectExFunc struct { + once sync.Once + addr uintptr + err error +} + +func LoadConnectEx() error { + connectExFunc.once.Do(func() { + var s Handle + s, connectExFunc.err = Socket(AF_INET, SOCK_STREAM, IPPROTO_TCP) + if connectExFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + connectExFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_CONNECTEX)), + uint32(unsafe.Sizeof(WSAID_CONNECTEX)), + (*byte)(unsafe.Pointer(&connectExFunc.addr)), + uint32(unsafe.Sizeof(connectExFunc.addr)), + &n, nil, 0) + }) + return connectExFunc.err +} + +func connectEx(s Handle, name unsafe.Pointer, namelen int32, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(connectExFunc.addr, 7, uintptr(s), uintptr(name), uintptr(namelen), uintptr(unsafe.Pointer(sendBuf)), uintptr(sendDataLen), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConnectEx(fd Handle, sa Sockaddr, sendBuf *byte, sendDataLen uint32, bytesSent *uint32, overlapped *Overlapped) error { + err := LoadConnectEx() + if err != nil { + return errorspkg.New("failed to find ConnectEx: " + err.Error()) + } + ptr, n, err := sa.sockaddr() + if err != nil { + return err + } + return connectEx(fd, ptr, n, sendBuf, sendDataLen, bytesSent, overlapped) +} + +var sendRecvMsgFunc struct { + once sync.Once + sendAddr uintptr + recvAddr uintptr + err error +} + +func loadWSASendRecvMsg() error { + sendRecvMsgFunc.once.Do(func() { + var s Handle + s, sendRecvMsgFunc.err = Socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP) + if sendRecvMsgFunc.err != nil { + return + } + defer CloseHandle(s) + var n uint32 + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSARECVMSG)), + uint32(unsafe.Sizeof(WSAID_WSARECVMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.recvAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.recvAddr)), + &n, nil, 0) + if sendRecvMsgFunc.err != nil { + return + } + sendRecvMsgFunc.err = WSAIoctl(s, + SIO_GET_EXTENSION_FUNCTION_POINTER, + (*byte)(unsafe.Pointer(&WSAID_WSASENDMSG)), + uint32(unsafe.Sizeof(WSAID_WSASENDMSG)), + (*byte)(unsafe.Pointer(&sendRecvMsgFunc.sendAddr)), + uint32(unsafe.Sizeof(sendRecvMsgFunc.sendAddr)), + &n, nil, 0) + }) + return sendRecvMsgFunc.err +} + +func WSASendMsg(fd Handle, msg *WSAMsg, flags uint32, bytesSent *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.sendAddr, 6, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(flags), uintptr(unsafe.Pointer(bytesSent)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +func WSARecvMsg(fd Handle, msg *WSAMsg, bytesReceived *uint32, overlapped *Overlapped, croutine *byte) error { + err := loadWSASendRecvMsg() + if err != nil { + return err + } + r1, _, e1 := syscall.Syscall6(sendRecvMsgFunc.recvAddr, 5, uintptr(fd), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(bytesReceived)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return err +} + +// Invented structures to support what package os expects. +type Rusage struct { + CreationTime Filetime + ExitTime Filetime + KernelTime Filetime + UserTime Filetime +} + +type WaitStatus struct { + ExitCode uint32 +} + +func (w WaitStatus) Exited() bool { return true } + +func (w WaitStatus) ExitStatus() int { return int(w.ExitCode) } + +func (w WaitStatus) Signal() Signal { return -1 } + +func (w WaitStatus) CoreDump() bool { return false } + +func (w WaitStatus) Stopped() bool { return false } + +func (w WaitStatus) Continued() bool { return false } + +func (w WaitStatus) StopSignal() Signal { return -1 } + +func (w WaitStatus) Signaled() bool { return false } + +func (w WaitStatus) TrapCause() int { return -1 } + +// Timespec is an invented structure on Windows, but here for +// consistency with the corresponding package for other operating systems. +type Timespec struct { + Sec int64 + Nsec int64 +} + +func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } + +func NsecToTimespec(nsec int64) (ts Timespec) { + ts.Sec = nsec / 1e9 + ts.Nsec = nsec % 1e9 + return +} + +// TODO(brainman): fix all needed for net + +func Accept(fd Handle) (nfd Handle, sa Sockaddr, err error) { return 0, nil, syscall.EWINDOWS } +func Recvfrom(fd Handle, p []byte, flags int) (n int, from Sockaddr, err error) { + return 0, nil, syscall.EWINDOWS +} +func Sendto(fd Handle, p []byte, flags int, to Sockaddr) (err error) { return syscall.EWINDOWS } +func SetsockoptTimeval(fd Handle, level, opt int, tv *Timeval) (err error) { return syscall.EWINDOWS } + +// The Linger struct is wrong but we only noticed after Go 1. +// sysLinger is the real system call structure. + +// BUG(brainman): The definition of Linger is not appropriate for direct use +// with Setsockopt and Getsockopt. +// Use SetsockoptLinger instead. + +type Linger struct { + Onoff int32 + Linger int32 +} + +type sysLinger struct { + Onoff uint16 + Linger uint16 +} + +type IPMreq struct { + Multiaddr [4]byte /* in_addr */ + Interface [4]byte /* in_addr */ +} + +type IPv6Mreq struct { + Multiaddr [16]byte /* in6_addr */ + Interface uint32 +} + +func GetsockoptInt(fd Handle, level, opt int) (int, error) { return -1, syscall.EWINDOWS } + +func SetsockoptLinger(fd Handle, level, opt int, l *Linger) (err error) { + sys := sysLinger{Onoff: uint16(l.Onoff), Linger: uint16(l.Linger)} + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&sys)), int32(unsafe.Sizeof(sys))) +} + +func SetsockoptInet4Addr(fd Handle, level, opt int, value [4]byte) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(&value[0])), 4) +} +func SetsockoptIPMreq(fd Handle, level, opt int, mreq *IPMreq) (err error) { + return Setsockopt(fd, int32(level), int32(opt), (*byte)(unsafe.Pointer(mreq)), int32(unsafe.Sizeof(*mreq))) +} +func SetsockoptIPv6Mreq(fd Handle, level, opt int, mreq *IPv6Mreq) (err error) { + return syscall.EWINDOWS +} + +func Getpid() (pid int) { return int(getCurrentProcessId()) } + +func FindFirstFile(name *uint16, data *Win32finddata) (handle Handle, err error) { + // NOTE(rsc): The Win32finddata struct is wrong for the system call: + // the two paths are each one uint16 short. Use the correct struct, + // a win32finddata1, and then copy the results out. + // There is no loss of expressivity here, because the final + // uint16, if it is used, is supposed to be a NUL, and Go doesn't need that. + // For Go 1.1, we might avoid the allocation of win32finddata1 here + // by adding a final Bug [2]uint16 field to the struct and then + // adjusting the fields in the result directly. + var data1 win32finddata1 + handle, err = findFirstFile1(name, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func FindNextFile(handle Handle, data *Win32finddata) (err error) { + var data1 win32finddata1 + err = findNextFile1(handle, &data1) + if err == nil { + copyFindData(data, &data1) + } + return +} + +func getProcessEntry(pid int) (*ProcessEntry32, error) { + snapshot, err := CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0) + if err != nil { + return nil, err + } + defer CloseHandle(snapshot) + var procEntry ProcessEntry32 + procEntry.Size = uint32(unsafe.Sizeof(procEntry)) + if err = Process32First(snapshot, &procEntry); err != nil { + return nil, err + } + for { + if procEntry.ProcessID == uint32(pid) { + return &procEntry, nil + } + err = Process32Next(snapshot, &procEntry) + if err != nil { + return nil, err + } + } +} + +func Getppid() (ppid int) { + pe, err := getProcessEntry(Getpid()) + if err != nil { + return -1 + } + return int(pe.ParentProcessID) +} + +// TODO(brainman): fix all needed for os +func Fchdir(fd Handle) (err error) { return syscall.EWINDOWS } +func Link(oldpath, newpath string) (err error) { return syscall.EWINDOWS } +func Symlink(path, link string) (err error) { return syscall.EWINDOWS } + +func Fchmod(fd Handle, mode uint32) (err error) { return syscall.EWINDOWS } +func Chown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Lchown(path string, uid int, gid int) (err error) { return syscall.EWINDOWS } +func Fchown(fd Handle, uid int, gid int) (err error) { return syscall.EWINDOWS } + +func Getuid() (uid int) { return -1 } +func Geteuid() (euid int) { return -1 } +func Getgid() (gid int) { return -1 } +func Getegid() (egid int) { return -1 } +func Getgroups() (gids []int, err error) { return nil, syscall.EWINDOWS } + +type Signal int + +func (s Signal) Signal() {} + +func (s Signal) String() string { + if 0 <= s && int(s) < len(signals) { + str := signals[s] + if str != "" { + return str + } + } + return "signal " + itoa(int(s)) +} + +func LoadCreateSymbolicLink() error { + return procCreateSymbolicLinkW.Find() +} + +// Readlink returns the destination of the named symbolic link. +func Readlink(path string, buf []byte) (n int, err error) { + fd, err := CreateFile(StringToUTF16Ptr(path), GENERIC_READ, 0, nil, OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT|FILE_FLAG_BACKUP_SEMANTICS, 0) + if err != nil { + return -1, err + } + defer CloseHandle(fd) + + rdbbuf := make([]byte, MAXIMUM_REPARSE_DATA_BUFFER_SIZE) + var bytesReturned uint32 + err = DeviceIoControl(fd, FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0], uint32(len(rdbbuf)), &bytesReturned, nil) + if err != nil { + return -1, err + } + + rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0])) + var s string + switch rdb.ReparseTag { + case IO_REPARSE_TAG_SYMLINK: + data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + case IO_REPARSE_TAG_MOUNT_POINT: + data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer)) + p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0])) + s = UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength-data.PrintNameOffset)/2]) + default: + // the path is not a symlink or junction but another type of reparse + // point + return -1, syscall.ENOENT + } + n = copy(buf, []byte(s)) + + return n, nil +} diff --git a/vendor/golang.org/x/sys/windows/syscall_windows_test.go b/vendor/golang.org/x/sys/windows/syscall_windows_test.go new file mode 100644 index 0000000000..9c7133cc41 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/syscall_windows_test.go @@ -0,0 +1,107 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows_test + +import ( + "io/ioutil" + "os" + "path/filepath" + "syscall" + "testing" + "unsafe" + + "golang.org/x/sys/windows" +) + +func TestWin32finddata(t *testing.T) { + dir, err := ioutil.TempDir("", "go-build") + if err != nil { + t.Fatalf("failed to create temp directory: %v", err) + } + defer os.RemoveAll(dir) + + path := filepath.Join(dir, "long_name.and_extension") + f, err := os.Create(path) + if err != nil { + t.Fatalf("failed to create %v: %v", path, err) + } + f.Close() + + type X struct { + fd windows.Win32finddata + got byte + pad [10]byte // to protect ourselves + + } + var want byte = 2 // it is unlikely to have this character in the filename + x := X{got: want} + + pathp, _ := windows.UTF16PtrFromString(path) + h, err := windows.FindFirstFile(pathp, &(x.fd)) + if err != nil { + t.Fatalf("FindFirstFile failed: %v", err) + } + err = windows.FindClose(h) + if err != nil { + t.Fatalf("FindClose failed: %v", err) + } + + if x.got != want { + t.Fatalf("memory corruption: want=%d got=%d", want, x.got) + } +} + +func TestFormatMessage(t *testing.T) { + dll := windows.MustLoadDLL("pdh.dll") + + pdhOpenQuery := func(datasrc *uint16, userdata uint32, query *windows.Handle) (errno uintptr) { + r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhOpenQueryW").Addr(), 3, uintptr(unsafe.Pointer(datasrc)), uintptr(userdata), uintptr(unsafe.Pointer(query))) + return r0 + } + + pdhCloseQuery := func(query windows.Handle) (errno uintptr) { + r0, _, _ := syscall.Syscall(dll.MustFindProc("PdhCloseQuery").Addr(), 1, uintptr(query), 0, 0) + return r0 + } + + var q windows.Handle + name, err := windows.UTF16PtrFromString("no_such_source") + if err != nil { + t.Fatal(err) + } + errno := pdhOpenQuery(name, 0, &q) + if errno == 0 { + pdhCloseQuery(q) + t.Fatal("PdhOpenQuery succeeded, but expected to fail.") + } + + const flags uint32 = syscall.FORMAT_MESSAGE_FROM_HMODULE | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS + buf := make([]uint16, 300) + _, err = windows.FormatMessage(flags, uintptr(dll.Handle), uint32(errno), 0, buf, nil) + if err != nil { + t.Fatalf("FormatMessage for handle=%x and errno=%x failed: %v", dll.Handle, errno, err) + } +} + +func abort(funcname string, err error) { + panic(funcname + " failed: " + err.Error()) +} + +func ExampleLoadLibrary() { + h, err := windows.LoadLibrary("kernel32.dll") + if err != nil { + abort("LoadLibrary", err) + } + defer windows.FreeLibrary(h) + proc, err := windows.GetProcAddress(h, "GetVersion") + if err != nil { + abort("GetProcAddress", err) + } + r, _, _ := syscall.Syscall(uintptr(proc), 0, 0, 0, 0) + major := byte(r) + minor := uint8(r >> 8) + build := uint16(r >> 16) + print("windows version ", major, ".", minor, " (Build ", build, ")\n") +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go new file mode 100644 index 0000000000..52c2037b68 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -0,0 +1,1333 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +import "syscall" + +const ( + // Windows errors. + ERROR_FILE_NOT_FOUND syscall.Errno = 2 + ERROR_PATH_NOT_FOUND syscall.Errno = 3 + ERROR_ACCESS_DENIED syscall.Errno = 5 + ERROR_NO_MORE_FILES syscall.Errno = 18 + ERROR_HANDLE_EOF syscall.Errno = 38 + ERROR_NETNAME_DELETED syscall.Errno = 64 + ERROR_FILE_EXISTS syscall.Errno = 80 + ERROR_BROKEN_PIPE syscall.Errno = 109 + ERROR_BUFFER_OVERFLOW syscall.Errno = 111 + ERROR_INSUFFICIENT_BUFFER syscall.Errno = 122 + ERROR_MOD_NOT_FOUND syscall.Errno = 126 + ERROR_PROC_NOT_FOUND syscall.Errno = 127 + ERROR_ALREADY_EXISTS syscall.Errno = 183 + ERROR_ENVVAR_NOT_FOUND syscall.Errno = 203 + ERROR_MORE_DATA syscall.Errno = 234 + ERROR_OPERATION_ABORTED syscall.Errno = 995 + ERROR_IO_PENDING syscall.Errno = 997 + ERROR_SERVICE_SPECIFIC_ERROR syscall.Errno = 1066 + ERROR_NOT_FOUND syscall.Errno = 1168 + ERROR_PRIVILEGE_NOT_HELD syscall.Errno = 1314 + WSAEACCES syscall.Errno = 10013 + WSAEMSGSIZE syscall.Errno = 10040 + WSAECONNRESET syscall.Errno = 10054 +) + +const ( + // Invented values to support what package os expects. + O_RDONLY = 0x00000 + O_WRONLY = 0x00001 + O_RDWR = 0x00002 + O_CREAT = 0x00040 + O_EXCL = 0x00080 + O_NOCTTY = 0x00100 + O_TRUNC = 0x00200 + O_NONBLOCK = 0x00800 + O_APPEND = 0x00400 + O_SYNC = 0x01000 + O_ASYNC = 0x02000 + O_CLOEXEC = 0x80000 +) + +const ( + // More invented values for signals + SIGHUP = Signal(0x1) + SIGINT = Signal(0x2) + SIGQUIT = Signal(0x3) + SIGILL = Signal(0x4) + SIGTRAP = Signal(0x5) + SIGABRT = Signal(0x6) + SIGBUS = Signal(0x7) + SIGFPE = Signal(0x8) + SIGKILL = Signal(0x9) + SIGSEGV = Signal(0xb) + SIGPIPE = Signal(0xd) + SIGALRM = Signal(0xe) + SIGTERM = Signal(0xf) +) + +var signals = [...]string{ + 1: "hangup", + 2: "interrupt", + 3: "quit", + 4: "illegal instruction", + 5: "trace/breakpoint trap", + 6: "aborted", + 7: "bus error", + 8: "floating point exception", + 9: "killed", + 10: "user defined signal 1", + 11: "segmentation fault", + 12: "user defined signal 2", + 13: "broken pipe", + 14: "alarm clock", + 15: "terminated", +} + +const ( + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 + + FILE_LIST_DIRECTORY = 0x00000001 + FILE_APPEND_DATA = 0x00000004 + FILE_WRITE_ATTRIBUTES = 0x00000100 + + FILE_SHARE_READ = 0x00000001 + FILE_SHARE_WRITE = 0x00000002 + FILE_SHARE_DELETE = 0x00000004 + FILE_ATTRIBUTE_READONLY = 0x00000001 + FILE_ATTRIBUTE_HIDDEN = 0x00000002 + FILE_ATTRIBUTE_SYSTEM = 0x00000004 + FILE_ATTRIBUTE_DIRECTORY = 0x00000010 + FILE_ATTRIBUTE_ARCHIVE = 0x00000020 + FILE_ATTRIBUTE_NORMAL = 0x00000080 + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + + INVALID_FILE_ATTRIBUTES = 0xffffffff + + CREATE_NEW = 1 + CREATE_ALWAYS = 2 + OPEN_EXISTING = 3 + OPEN_ALWAYS = 4 + TRUNCATE_EXISTING = 5 + + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + FILE_FLAG_OVERLAPPED = 0x40000000 + + HANDLE_FLAG_INHERIT = 0x00000001 + STARTF_USESTDHANDLES = 0x00000100 + STARTF_USESHOWWINDOW = 0x00000001 + DUPLICATE_CLOSE_SOURCE = 0x00000001 + DUPLICATE_SAME_ACCESS = 0x00000002 + + STD_INPUT_HANDLE = -10 & (1<<32 - 1) + STD_OUTPUT_HANDLE = -11 & (1<<32 - 1) + STD_ERROR_HANDLE = -12 & (1<<32 - 1) + + FILE_BEGIN = 0 + FILE_CURRENT = 1 + FILE_END = 2 + + LANG_ENGLISH = 0x09 + SUBLANG_ENGLISH_US = 0x01 + + FORMAT_MESSAGE_ALLOCATE_BUFFER = 256 + FORMAT_MESSAGE_IGNORE_INSERTS = 512 + FORMAT_MESSAGE_FROM_STRING = 1024 + FORMAT_MESSAGE_FROM_HMODULE = 2048 + FORMAT_MESSAGE_FROM_SYSTEM = 4096 + FORMAT_MESSAGE_ARGUMENT_ARRAY = 8192 + FORMAT_MESSAGE_MAX_WIDTH_MASK = 255 + + MAX_PATH = 260 + MAX_LONG_PATH = 32768 + + MAX_COMPUTERNAME_LENGTH = 15 + + TIME_ZONE_ID_UNKNOWN = 0 + TIME_ZONE_ID_STANDARD = 1 + + TIME_ZONE_ID_DAYLIGHT = 2 + IGNORE = 0 + INFINITE = 0xffffffff + + WAIT_TIMEOUT = 258 + WAIT_ABANDONED = 0x00000080 + WAIT_OBJECT_0 = 0x00000000 + WAIT_FAILED = 0xFFFFFFFF + + PROCESS_TERMINATE = 1 + PROCESS_QUERY_INFORMATION = 0x00000400 + SYNCHRONIZE = 0x00100000 + + FILE_MAP_COPY = 0x01 + FILE_MAP_WRITE = 0x02 + FILE_MAP_READ = 0x04 + FILE_MAP_EXECUTE = 0x20 + + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + + // Windows reserves errors >= 1<<29 for application use. + APPLICATION_ERROR = 1 << 29 +) + +const ( + // Process creation flags. + CREATE_BREAKAWAY_FROM_JOB = 0x01000000 + CREATE_DEFAULT_ERROR_MODE = 0x04000000 + CREATE_NEW_CONSOLE = 0x00000010 + CREATE_NEW_PROCESS_GROUP = 0x00000200 + CREATE_NO_WINDOW = 0x08000000 + CREATE_PROTECTED_PROCESS = 0x00040000 + CREATE_PRESERVE_CODE_AUTHZ_LEVEL = 0x02000000 + CREATE_SEPARATE_WOW_VDM = 0x00000800 + CREATE_SHARED_WOW_VDM = 0x00001000 + CREATE_SUSPENDED = 0x00000004 + CREATE_UNICODE_ENVIRONMENT = 0x00000400 + DEBUG_ONLY_THIS_PROCESS = 0x00000002 + DEBUG_PROCESS = 0x00000001 + DETACHED_PROCESS = 0x00000008 + EXTENDED_STARTUPINFO_PRESENT = 0x00080000 + INHERIT_PARENT_AFFINITY = 0x00010000 +) + +const ( + // flags for CreateToolhelp32Snapshot + TH32CS_SNAPHEAPLIST = 0x01 + TH32CS_SNAPPROCESS = 0x02 + TH32CS_SNAPTHREAD = 0x04 + TH32CS_SNAPMODULE = 0x08 + TH32CS_SNAPMODULE32 = 0x10 + TH32CS_SNAPALL = TH32CS_SNAPHEAPLIST | TH32CS_SNAPMODULE | TH32CS_SNAPPROCESS | TH32CS_SNAPTHREAD + TH32CS_INHERIT = 0x80000000 +) + +const ( + // filters for ReadDirectoryChangesW + FILE_NOTIFY_CHANGE_FILE_NAME = 0x001 + FILE_NOTIFY_CHANGE_DIR_NAME = 0x002 + FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x004 + FILE_NOTIFY_CHANGE_SIZE = 0x008 + FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 + FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 + FILE_NOTIFY_CHANGE_CREATION = 0x040 + FILE_NOTIFY_CHANGE_SECURITY = 0x100 +) + +const ( + // do not reorder + FILE_ACTION_ADDED = iota + 1 + FILE_ACTION_REMOVED + FILE_ACTION_MODIFIED + FILE_ACTION_RENAMED_OLD_NAME + FILE_ACTION_RENAMED_NEW_NAME +) + +const ( + // wincrypt.h + PROV_RSA_FULL = 1 + PROV_RSA_SIG = 2 + PROV_DSS = 3 + PROV_FORTEZZA = 4 + PROV_MS_EXCHANGE = 5 + PROV_SSL = 6 + PROV_RSA_SCHANNEL = 12 + PROV_DSS_DH = 13 + PROV_EC_ECDSA_SIG = 14 + PROV_EC_ECNRA_SIG = 15 + PROV_EC_ECDSA_FULL = 16 + PROV_EC_ECNRA_FULL = 17 + PROV_DH_SCHANNEL = 18 + PROV_SPYRUS_LYNKS = 20 + PROV_RNG = 21 + PROV_INTEL_SEC = 22 + PROV_REPLACE_OWF = 23 + PROV_RSA_AES = 24 + CRYPT_VERIFYCONTEXT = 0xF0000000 + CRYPT_NEWKEYSET = 0x00000008 + CRYPT_DELETEKEYSET = 0x00000010 + CRYPT_MACHINE_KEYSET = 0x00000020 + CRYPT_SILENT = 0x00000040 + CRYPT_DEFAULT_CONTAINER_OPTIONAL = 0x00000080 + + USAGE_MATCH_TYPE_AND = 0 + USAGE_MATCH_TYPE_OR = 1 + + X509_ASN_ENCODING = 0x00000001 + PKCS_7_ASN_ENCODING = 0x00010000 + + CERT_STORE_PROV_MEMORY = 2 + + CERT_STORE_ADD_ALWAYS = 4 + + CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG = 0x00000004 + + CERT_TRUST_NO_ERROR = 0x00000000 + CERT_TRUST_IS_NOT_TIME_VALID = 0x00000001 + CERT_TRUST_IS_REVOKED = 0x00000004 + CERT_TRUST_IS_NOT_SIGNATURE_VALID = 0x00000008 + CERT_TRUST_IS_NOT_VALID_FOR_USAGE = 0x00000010 + CERT_TRUST_IS_UNTRUSTED_ROOT = 0x00000020 + CERT_TRUST_REVOCATION_STATUS_UNKNOWN = 0x00000040 + CERT_TRUST_IS_CYCLIC = 0x00000080 + CERT_TRUST_INVALID_EXTENSION = 0x00000100 + CERT_TRUST_INVALID_POLICY_CONSTRAINTS = 0x00000200 + CERT_TRUST_INVALID_BASIC_CONSTRAINTS = 0x00000400 + CERT_TRUST_INVALID_NAME_CONSTRAINTS = 0x00000800 + CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT = 0x00001000 + CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT = 0x00002000 + CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT = 0x00004000 + CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT = 0x00008000 + CERT_TRUST_IS_OFFLINE_REVOCATION = 0x01000000 + CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY = 0x02000000 + CERT_TRUST_IS_EXPLICIT_DISTRUST = 0x04000000 + CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT = 0x08000000 + + CERT_CHAIN_POLICY_BASE = 1 + CERT_CHAIN_POLICY_AUTHENTICODE = 2 + CERT_CHAIN_POLICY_AUTHENTICODE_TS = 3 + CERT_CHAIN_POLICY_SSL = 4 + CERT_CHAIN_POLICY_BASIC_CONSTRAINTS = 5 + CERT_CHAIN_POLICY_NT_AUTH = 6 + CERT_CHAIN_POLICY_MICROSOFT_ROOT = 7 + CERT_CHAIN_POLICY_EV = 8 + + CERT_E_EXPIRED = 0x800B0101 + CERT_E_ROLE = 0x800B0103 + CERT_E_PURPOSE = 0x800B0106 + CERT_E_UNTRUSTEDROOT = 0x800B0109 + CERT_E_CN_NO_MATCH = 0x800B010F + + AUTHTYPE_CLIENT = 1 + AUTHTYPE_SERVER = 2 +) + +var ( + OID_PKIX_KP_SERVER_AUTH = []byte("1.3.6.1.5.5.7.3.1\x00") + OID_SERVER_GATED_CRYPTO = []byte("1.3.6.1.4.1.311.10.3.3\x00") + OID_SGC_NETSCAPE = []byte("2.16.840.1.113730.4.1\x00") +) + +// Invented values to support what package os expects. +type Timeval struct { + Sec int32 + Usec int32 +} + +func (tv *Timeval) Nanoseconds() int64 { + return (int64(tv.Sec)*1e6 + int64(tv.Usec)) * 1e3 +} + +func NsecToTimeval(nsec int64) (tv Timeval) { + tv.Sec = int32(nsec / 1e9) + tv.Usec = int32(nsec % 1e9 / 1e3) + return +} + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor uintptr + InheritHandle uint32 +} + +type Overlapped struct { + Internal uintptr + InternalHigh uintptr + Offset uint32 + OffsetHigh uint32 + HEvent Handle +} + +type FileNotifyInformation struct { + NextEntryOffset uint32 + Action uint32 + FileNameLength uint32 + FileName uint16 +} + +type Filetime struct { + LowDateTime uint32 + HighDateTime uint32 +} + +// Nanoseconds returns Filetime ft in nanoseconds +// since Epoch (00:00:00 UTC, January 1, 1970). +func (ft *Filetime) Nanoseconds() int64 { + // 100-nanosecond intervals since January 1, 1601 + nsec := int64(ft.HighDateTime)<<32 + int64(ft.LowDateTime) + // change starting time to the Epoch (00:00:00 UTC, January 1, 1970) + nsec -= 116444736000000000 + // convert into nanoseconds + nsec *= 100 + return nsec +} + +func NsecToFiletime(nsec int64) (ft Filetime) { + // convert into 100-nanosecond + nsec /= 100 + // change starting time to January 1, 1601 + nsec += 116444736000000000 + // split into high / low + ft.LowDateTime = uint32(nsec & 0xffffffff) + ft.HighDateTime = uint32(nsec >> 32 & 0xffffffff) + return ft +} + +type Win32finddata struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH - 1]uint16 + AlternateFileName [13]uint16 +} + +// This is the actual system call structure. +// Win32finddata is what we committed to in Go 1. +type win32finddata1 struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 + Reserved0 uint32 + Reserved1 uint32 + FileName [MAX_PATH]uint16 + AlternateFileName [14]uint16 +} + +func copyFindData(dst *Win32finddata, src *win32finddata1) { + dst.FileAttributes = src.FileAttributes + dst.CreationTime = src.CreationTime + dst.LastAccessTime = src.LastAccessTime + dst.LastWriteTime = src.LastWriteTime + dst.FileSizeHigh = src.FileSizeHigh + dst.FileSizeLow = src.FileSizeLow + dst.Reserved0 = src.Reserved0 + dst.Reserved1 = src.Reserved1 + + // The src is 1 element bigger than dst, but it must be NUL. + copy(dst.FileName[:], src.FileName[:]) + copy(dst.AlternateFileName[:], src.AlternateFileName[:]) +} + +type ByHandleFileInformation struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + VolumeSerialNumber uint32 + FileSizeHigh uint32 + FileSizeLow uint32 + NumberOfLinks uint32 + FileIndexHigh uint32 + FileIndexLow uint32 +} + +const ( + GetFileExInfoStandard = 0 + GetFileExMaxInfoLevel = 1 +) + +type Win32FileAttributeData struct { + FileAttributes uint32 + CreationTime Filetime + LastAccessTime Filetime + LastWriteTime Filetime + FileSizeHigh uint32 + FileSizeLow uint32 +} + +// ShowWindow constants +const ( + // winuser.h + SW_HIDE = 0 + SW_NORMAL = 1 + SW_SHOWNORMAL = 1 + SW_SHOWMINIMIZED = 2 + SW_SHOWMAXIMIZED = 3 + SW_MAXIMIZE = 3 + SW_SHOWNOACTIVATE = 4 + SW_SHOW = 5 + SW_MINIMIZE = 6 + SW_SHOWMINNOACTIVE = 7 + SW_SHOWNA = 8 + SW_RESTORE = 9 + SW_SHOWDEFAULT = 10 + SW_FORCEMINIMIZE = 11 +) + +type StartupInfo struct { + Cb uint32 + _ *uint16 + Desktop *uint16 + Title *uint16 + X uint32 + Y uint32 + XSize uint32 + YSize uint32 + XCountChars uint32 + YCountChars uint32 + FillAttribute uint32 + Flags uint32 + ShowWindow uint16 + _ uint16 + _ *byte + StdInput Handle + StdOutput Handle + StdErr Handle +} + +type ProcessInformation struct { + Process Handle + Thread Handle + ProcessId uint32 + ThreadId uint32 +} + +type ProcessEntry32 struct { + Size uint32 + Usage uint32 + ProcessID uint32 + DefaultHeapID uintptr + ModuleID uint32 + Threads uint32 + ParentProcessID uint32 + PriClassBase int32 + Flags uint32 + ExeFile [MAX_PATH]uint16 +} + +type Systemtime struct { + Year uint16 + Month uint16 + DayOfWeek uint16 + Day uint16 + Hour uint16 + Minute uint16 + Second uint16 + Milliseconds uint16 +} + +type Timezoneinformation struct { + Bias int32 + StandardName [32]uint16 + StandardDate Systemtime + StandardBias int32 + DaylightName [32]uint16 + DaylightDate Systemtime + DaylightBias int32 +} + +// Socket related. + +const ( + AF_UNSPEC = 0 + AF_UNIX = 1 + AF_INET = 2 + AF_INET6 = 23 + AF_NETBIOS = 17 + + SOCK_STREAM = 1 + SOCK_DGRAM = 2 + SOCK_RAW = 3 + SOCK_SEQPACKET = 5 + + IPPROTO_IP = 0 + IPPROTO_IPV6 = 0x29 + IPPROTO_TCP = 6 + IPPROTO_UDP = 17 + + SOL_SOCKET = 0xffff + SO_REUSEADDR = 4 + SO_KEEPALIVE = 8 + SO_DONTROUTE = 16 + SO_BROADCAST = 32 + SO_LINGER = 128 + SO_RCVBUF = 0x1002 + SO_SNDBUF = 0x1001 + SO_UPDATE_ACCEPT_CONTEXT = 0x700b + SO_UPDATE_CONNECT_CONTEXT = 0x7010 + + IOC_OUT = 0x40000000 + IOC_IN = 0x80000000 + IOC_VENDOR = 0x18000000 + IOC_INOUT = IOC_IN | IOC_OUT + IOC_WS2 = 0x08000000 + SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 + SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 + SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + + // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 + + IP_TOS = 0x3 + IP_TTL = 0x4 + IP_MULTICAST_IF = 0x9 + IP_MULTICAST_TTL = 0xa + IP_MULTICAST_LOOP = 0xb + IP_ADD_MEMBERSHIP = 0xc + IP_DROP_MEMBERSHIP = 0xd + + IPV6_V6ONLY = 0x1b + IPV6_UNICAST_HOPS = 0x4 + IPV6_MULTICAST_IF = 0x9 + IPV6_MULTICAST_HOPS = 0xa + IPV6_MULTICAST_LOOP = 0xb + IPV6_JOIN_GROUP = 0xc + IPV6_LEAVE_GROUP = 0xd + + MSG_OOB = 0x1 + MSG_PEEK = 0x2 + MSG_DONTROUTE = 0x4 + MSG_WAITALL = 0x8 + + MSG_TRUNC = 0x0100 + MSG_CTRUNC = 0x0200 + MSG_BCAST = 0x0400 + MSG_MCAST = 0x0800 + + SOMAXCONN = 0x7fffffff + + TCP_NODELAY = 1 + + SHUT_RD = 0 + SHUT_WR = 1 + SHUT_RDWR = 2 + + WSADESCRIPTION_LEN = 256 + WSASYS_STATUS_LEN = 128 +) + +type WSABuf struct { + Len uint32 + Buf *byte +} + +type WSAMsg struct { + Name *syscall.RawSockaddrAny + Namelen int32 + Buffers *WSABuf + BufferCount uint32 + Control WSABuf + Flags uint32 +} + +// Invented values to support what package os expects. +const ( + S_IFMT = 0x1f000 + S_IFIFO = 0x1000 + S_IFCHR = 0x2000 + S_IFDIR = 0x4000 + S_IFBLK = 0x6000 + S_IFREG = 0x8000 + S_IFLNK = 0xa000 + S_IFSOCK = 0xc000 + S_ISUID = 0x800 + S_ISGID = 0x400 + S_ISVTX = 0x200 + S_IRUSR = 0x100 + S_IWRITE = 0x80 + S_IWUSR = 0x80 + S_IXUSR = 0x40 +) + +const ( + FILE_TYPE_CHAR = 0x0002 + FILE_TYPE_DISK = 0x0001 + FILE_TYPE_PIPE = 0x0003 + FILE_TYPE_REMOTE = 0x8000 + FILE_TYPE_UNKNOWN = 0x0000 +) + +type Hostent struct { + Name *byte + Aliases **byte + AddrType uint16 + Length uint16 + AddrList **byte +} + +type Protoent struct { + Name *byte + Aliases **byte + Proto uint16 +} + +const ( + DNS_TYPE_A = 0x0001 + DNS_TYPE_NS = 0x0002 + DNS_TYPE_MD = 0x0003 + DNS_TYPE_MF = 0x0004 + DNS_TYPE_CNAME = 0x0005 + DNS_TYPE_SOA = 0x0006 + DNS_TYPE_MB = 0x0007 + DNS_TYPE_MG = 0x0008 + DNS_TYPE_MR = 0x0009 + DNS_TYPE_NULL = 0x000a + DNS_TYPE_WKS = 0x000b + DNS_TYPE_PTR = 0x000c + DNS_TYPE_HINFO = 0x000d + DNS_TYPE_MINFO = 0x000e + DNS_TYPE_MX = 0x000f + DNS_TYPE_TEXT = 0x0010 + DNS_TYPE_RP = 0x0011 + DNS_TYPE_AFSDB = 0x0012 + DNS_TYPE_X25 = 0x0013 + DNS_TYPE_ISDN = 0x0014 + DNS_TYPE_RT = 0x0015 + DNS_TYPE_NSAP = 0x0016 + DNS_TYPE_NSAPPTR = 0x0017 + DNS_TYPE_SIG = 0x0018 + DNS_TYPE_KEY = 0x0019 + DNS_TYPE_PX = 0x001a + DNS_TYPE_GPOS = 0x001b + DNS_TYPE_AAAA = 0x001c + DNS_TYPE_LOC = 0x001d + DNS_TYPE_NXT = 0x001e + DNS_TYPE_EID = 0x001f + DNS_TYPE_NIMLOC = 0x0020 + DNS_TYPE_SRV = 0x0021 + DNS_TYPE_ATMA = 0x0022 + DNS_TYPE_NAPTR = 0x0023 + DNS_TYPE_KX = 0x0024 + DNS_TYPE_CERT = 0x0025 + DNS_TYPE_A6 = 0x0026 + DNS_TYPE_DNAME = 0x0027 + DNS_TYPE_SINK = 0x0028 + DNS_TYPE_OPT = 0x0029 + DNS_TYPE_DS = 0x002B + DNS_TYPE_RRSIG = 0x002E + DNS_TYPE_NSEC = 0x002F + DNS_TYPE_DNSKEY = 0x0030 + DNS_TYPE_DHCID = 0x0031 + DNS_TYPE_UINFO = 0x0064 + DNS_TYPE_UID = 0x0065 + DNS_TYPE_GID = 0x0066 + DNS_TYPE_UNSPEC = 0x0067 + DNS_TYPE_ADDRS = 0x00f8 + DNS_TYPE_TKEY = 0x00f9 + DNS_TYPE_TSIG = 0x00fa + DNS_TYPE_IXFR = 0x00fb + DNS_TYPE_AXFR = 0x00fc + DNS_TYPE_MAILB = 0x00fd + DNS_TYPE_MAILA = 0x00fe + DNS_TYPE_ALL = 0x00ff + DNS_TYPE_ANY = 0x00ff + DNS_TYPE_WINS = 0xff01 + DNS_TYPE_WINSR = 0xff02 + DNS_TYPE_NBSTAT = 0xff01 +) + +const ( + DNS_INFO_NO_RECORDS = 0x251D +) + +const ( + // flags inside DNSRecord.Dw + DnsSectionQuestion = 0x0000 + DnsSectionAnswer = 0x0001 + DnsSectionAuthority = 0x0002 + DnsSectionAdditional = 0x0003 +) + +type DNSSRVData struct { + Target *uint16 + Priority uint16 + Weight uint16 + Port uint16 + Pad uint16 +} + +type DNSPTRData struct { + Host *uint16 +} + +type DNSMXData struct { + NameExchange *uint16 + Preference uint16 + Pad uint16 +} + +type DNSTXTData struct { + StringCount uint16 + StringArray [1]*uint16 +} + +type DNSRecord struct { + Next *DNSRecord + Name *uint16 + Type uint16 + Length uint16 + Dw uint32 + Ttl uint32 + Reserved uint32 + Data [40]byte +} + +const ( + TF_DISCONNECT = 1 + TF_REUSE_SOCKET = 2 + TF_WRITE_BEHIND = 4 + TF_USE_DEFAULT_WORKER = 0 + TF_USE_SYSTEM_THREAD = 16 + TF_USE_KERNEL_APC = 32 +) + +type TransmitFileBuffers struct { + Head uintptr + HeadLength uint32 + Tail uintptr + TailLength uint32 +} + +const ( + IFF_UP = 1 + IFF_BROADCAST = 2 + IFF_LOOPBACK = 4 + IFF_POINTTOPOINT = 8 + IFF_MULTICAST = 16 +) + +const SIO_GET_INTERFACE_LIST = 0x4004747F + +// TODO(mattn): SockaddrGen is union of sockaddr/sockaddr_in/sockaddr_in6_old. +// will be fixed to change variable type as suitable. + +type SockaddrGen [24]byte + +type InterfaceInfo struct { + Flags uint32 + Address SockaddrGen + BroadcastAddress SockaddrGen + Netmask SockaddrGen +} + +type IpAddressString struct { + String [16]byte +} + +type IpMaskString IpAddressString + +type IpAddrString struct { + Next *IpAddrString + IpAddress IpAddressString + IpMask IpMaskString + Context uint32 +} + +const MAX_ADAPTER_NAME_LENGTH = 256 +const MAX_ADAPTER_DESCRIPTION_LENGTH = 128 +const MAX_ADAPTER_ADDRESS_LENGTH = 8 + +type IpAdapterInfo struct { + Next *IpAdapterInfo + ComboIndex uint32 + AdapterName [MAX_ADAPTER_NAME_LENGTH + 4]byte + Description [MAX_ADAPTER_DESCRIPTION_LENGTH + 4]byte + AddressLength uint32 + Address [MAX_ADAPTER_ADDRESS_LENGTH]byte + Index uint32 + Type uint32 + DhcpEnabled uint32 + CurrentIpAddress *IpAddrString + IpAddressList IpAddrString + GatewayList IpAddrString + DhcpServer IpAddrString + HaveWins bool + PrimaryWinsServer IpAddrString + SecondaryWinsServer IpAddrString + LeaseObtained int64 + LeaseExpires int64 +} + +const MAXLEN_PHYSADDR = 8 +const MAX_INTERFACE_NAME_LEN = 256 +const MAXLEN_IFDESCR = 256 + +type MibIfRow struct { + Name [MAX_INTERFACE_NAME_LEN]uint16 + Index uint32 + Type uint32 + Mtu uint32 + Speed uint32 + PhysAddrLen uint32 + PhysAddr [MAXLEN_PHYSADDR]byte + AdminStatus uint32 + OperStatus uint32 + LastChange uint32 + InOctets uint32 + InUcastPkts uint32 + InNUcastPkts uint32 + InDiscards uint32 + InErrors uint32 + InUnknownProtos uint32 + OutOctets uint32 + OutUcastPkts uint32 + OutNUcastPkts uint32 + OutDiscards uint32 + OutErrors uint32 + OutQLen uint32 + DescrLen uint32 + Descr [MAXLEN_IFDESCR]byte +} + +type CertContext struct { + EncodingType uint32 + EncodedCert *byte + Length uint32 + CertInfo uintptr + Store Handle +} + +type CertChainContext struct { + Size uint32 + TrustStatus CertTrustStatus + ChainCount uint32 + Chains **CertSimpleChain + LowerQualityChainCount uint32 + LowerQualityChains **CertChainContext + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertSimpleChain struct { + Size uint32 + TrustStatus CertTrustStatus + NumElements uint32 + Elements **CertChainElement + TrustListInfo uintptr + HasRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 +} + +type CertChainElement struct { + Size uint32 + CertContext *CertContext + TrustStatus CertTrustStatus + RevocationInfo *CertRevocationInfo + IssuanceUsage *CertEnhKeyUsage + ApplicationUsage *CertEnhKeyUsage + ExtendedErrorInfo *uint16 +} + +type CertRevocationInfo struct { + Size uint32 + RevocationResult uint32 + RevocationOid *byte + OidSpecificInfo uintptr + HasFreshnessTime uint32 + FreshnessTime uint32 + CrlInfo uintptr // *CertRevocationCrlInfo +} + +type CertTrustStatus struct { + ErrorStatus uint32 + InfoStatus uint32 +} + +type CertUsageMatch struct { + Type uint32 + Usage CertEnhKeyUsage +} + +type CertEnhKeyUsage struct { + Length uint32 + UsageIdentifiers **byte +} + +type CertChainPara struct { + Size uint32 + RequestedUsage CertUsageMatch + RequstedIssuancePolicy CertUsageMatch + URLRetrievalTimeout uint32 + CheckRevocationFreshnessTime uint32 + RevocationFreshnessTime uint32 + CacheResync *Filetime +} + +type CertChainPolicyPara struct { + Size uint32 + Flags uint32 + ExtraPolicyPara uintptr +} + +type SSLExtraCertChainPolicyPara struct { + Size uint32 + AuthType uint32 + Checks uint32 + ServerName *uint16 +} + +type CertChainPolicyStatus struct { + Size uint32 + Error uint32 + ChainIndex uint32 + ElementIndex uint32 + ExtraPolicyStatus uintptr +} + +const ( + // do not reorder + HKEY_CLASSES_ROOT = 0x80000000 + iota + HKEY_CURRENT_USER + HKEY_LOCAL_MACHINE + HKEY_USERS + HKEY_PERFORMANCE_DATA + HKEY_CURRENT_CONFIG + HKEY_DYN_DATA + + KEY_QUERY_VALUE = 1 + KEY_SET_VALUE = 2 + KEY_CREATE_SUB_KEY = 4 + KEY_ENUMERATE_SUB_KEYS = 8 + KEY_NOTIFY = 16 + KEY_CREATE_LINK = 32 + KEY_WRITE = 0x20006 + KEY_EXECUTE = 0x20019 + KEY_READ = 0x20019 + KEY_WOW64_64KEY = 0x0100 + KEY_WOW64_32KEY = 0x0200 + KEY_ALL_ACCESS = 0xf003f +) + +const ( + // do not reorder + REG_NONE = iota + REG_SZ + REG_EXPAND_SZ + REG_BINARY + REG_DWORD_LITTLE_ENDIAN + REG_DWORD_BIG_ENDIAN + REG_LINK + REG_MULTI_SZ + REG_RESOURCE_LIST + REG_FULL_RESOURCE_DESCRIPTOR + REG_RESOURCE_REQUIREMENTS_LIST + REG_QWORD_LITTLE_ENDIAN + REG_DWORD = REG_DWORD_LITTLE_ENDIAN + REG_QWORD = REG_QWORD_LITTLE_ENDIAN +) + +type AddrinfoW struct { + Flags int32 + Family int32 + Socktype int32 + Protocol int32 + Addrlen uintptr + Canonname *uint16 + Addr uintptr + Next *AddrinfoW +} + +const ( + AI_PASSIVE = 1 + AI_CANONNAME = 2 + AI_NUMERICHOST = 4 +) + +type GUID struct { + Data1 uint32 + Data2 uint16 + Data3 uint16 + Data4 [8]byte +} + +var WSAID_CONNECTEX = GUID{ + 0x25a207b9, + 0xddf3, + 0x4660, + [8]byte{0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e}, +} + +var WSAID_WSASENDMSG = GUID{ + 0xa441e712, + 0x754f, + 0x43ca, + [8]byte{0x84, 0xa7, 0x0d, 0xee, 0x44, 0xcf, 0x60, 0x6d}, +} + +var WSAID_WSARECVMSG = GUID{ + 0xf689d7c8, + 0x6f1f, + 0x436b, + [8]byte{0x8a, 0x53, 0xe5, 0x4f, 0xe3, 0x51, 0xc3, 0x22}, +} + +const ( + FILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + FILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +const ( + WSAPROTOCOL_LEN = 255 + MAX_PROTOCOL_CHAIN = 7 + BASE_PROTOCOL = 1 + LAYERED_PROTOCOL = 0 + + XP1_CONNECTIONLESS = 0x00000001 + XP1_GUARANTEED_DELIVERY = 0x00000002 + XP1_GUARANTEED_ORDER = 0x00000004 + XP1_MESSAGE_ORIENTED = 0x00000008 + XP1_PSEUDO_STREAM = 0x00000010 + XP1_GRACEFUL_CLOSE = 0x00000020 + XP1_EXPEDITED_DATA = 0x00000040 + XP1_CONNECT_DATA = 0x00000080 + XP1_DISCONNECT_DATA = 0x00000100 + XP1_SUPPORT_BROADCAST = 0x00000200 + XP1_SUPPORT_MULTIPOINT = 0x00000400 + XP1_MULTIPOINT_CONTROL_PLANE = 0x00000800 + XP1_MULTIPOINT_DATA_PLANE = 0x00001000 + XP1_QOS_SUPPORTED = 0x00002000 + XP1_UNI_SEND = 0x00008000 + XP1_UNI_RECV = 0x00010000 + XP1_IFS_HANDLES = 0x00020000 + XP1_PARTIAL_MESSAGE = 0x00040000 + XP1_SAN_SUPPORT_SDP = 0x00080000 + + PFL_MULTIPLE_PROTO_ENTRIES = 0x00000001 + PFL_RECOMMENDED_PROTO_ENTRY = 0x00000002 + PFL_HIDDEN = 0x00000004 + PFL_MATCHES_PROTOCOL_ZERO = 0x00000008 + PFL_NETWORKDIRECT_PROVIDER = 0x00000010 +) + +type WSAProtocolInfo struct { + ServiceFlags1 uint32 + ServiceFlags2 uint32 + ServiceFlags3 uint32 + ServiceFlags4 uint32 + ProviderFlags uint32 + ProviderId GUID + CatalogEntryId uint32 + ProtocolChain WSAProtocolChain + Version int32 + AddressFamily int32 + MaxSockAddr int32 + MinSockAddr int32 + SocketType int32 + Protocol int32 + ProtocolMaxOffset int32 + NetworkByteOrder int32 + SecurityScheme int32 + MessageSize uint32 + ProviderReserved uint32 + ProtocolName [WSAPROTOCOL_LEN + 1]uint16 +} + +type WSAProtocolChain struct { + ChainLen int32 + ChainEntries [MAX_PROTOCOL_CHAIN]uint32 +} + +type TCPKeepalive struct { + OnOff uint32 + Time uint32 + Interval uint32 +} + +type symbolicLinkReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + Flags uint32 + PathBuffer [1]uint16 +} + +type mountPointReparseBuffer struct { + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 + PathBuffer [1]uint16 +} + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + + // GenericReparseBuffer + reparseBuffer byte +} + +const ( + FSCTL_GET_REPARSE_POINT = 0x900A8 + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_SYMLINK = 0xA000000C + SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1 +) + +const ( + ComputerNameNetBIOS = 0 + ComputerNameDnsHostname = 1 + ComputerNameDnsDomain = 2 + ComputerNameDnsFullyQualified = 3 + ComputerNamePhysicalNetBIOS = 4 + ComputerNamePhysicalDnsHostname = 5 + ComputerNamePhysicalDnsDomain = 6 + ComputerNamePhysicalDnsFullyQualified = 7 + ComputerNameMax = 8 +) + +const ( + MOVEFILE_REPLACE_EXISTING = 0x1 + MOVEFILE_COPY_ALLOWED = 0x2 + MOVEFILE_DELAY_UNTIL_REBOOT = 0x4 + MOVEFILE_WRITE_THROUGH = 0x8 + MOVEFILE_CREATE_HARDLINK = 0x10 + MOVEFILE_FAIL_IF_NOT_TRACKABLE = 0x20 +) + +const GAA_FLAG_INCLUDE_PREFIX = 0x00000010 + +const ( + IF_TYPE_OTHER = 1 + IF_TYPE_ETHERNET_CSMACD = 6 + IF_TYPE_ISO88025_TOKENRING = 9 + IF_TYPE_PPP = 23 + IF_TYPE_SOFTWARE_LOOPBACK = 24 + IF_TYPE_ATM = 37 + IF_TYPE_IEEE80211 = 71 + IF_TYPE_TUNNEL = 131 + IF_TYPE_IEEE1394 = 144 +) + +type SocketAddress struct { + Sockaddr *syscall.RawSockaddrAny + SockaddrLength int32 +} + +type IpAdapterUnicastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterUnicastAddress + Address SocketAddress + PrefixOrigin int32 + SuffixOrigin int32 + DadState int32 + ValidLifetime uint32 + PreferredLifetime uint32 + LeaseLifetime uint32 + OnLinkPrefixLength uint8 +} + +type IpAdapterAnycastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterAnycastAddress + Address SocketAddress +} + +type IpAdapterMulticastAddress struct { + Length uint32 + Flags uint32 + Next *IpAdapterMulticastAddress + Address SocketAddress +} + +type IpAdapterDnsServerAdapter struct { + Length uint32 + Reserved uint32 + Next *IpAdapterDnsServerAdapter + Address SocketAddress +} + +type IpAdapterPrefix struct { + Length uint32 + Flags uint32 + Next *IpAdapterPrefix + Address SocketAddress + PrefixLength uint32 +} + +type IpAdapterAddresses struct { + Length uint32 + IfIndex uint32 + Next *IpAdapterAddresses + AdapterName *byte + FirstUnicastAddress *IpAdapterUnicastAddress + FirstAnycastAddress *IpAdapterAnycastAddress + FirstMulticastAddress *IpAdapterMulticastAddress + FirstDnsServerAddress *IpAdapterDnsServerAdapter + DnsSuffix *uint16 + Description *uint16 + FriendlyName *uint16 + PhysicalAddress [syscall.MAX_ADAPTER_ADDRESS_LENGTH]byte + PhysicalAddressLength uint32 + Flags uint32 + Mtu uint32 + IfType uint32 + OperStatus uint32 + Ipv6IfIndex uint32 + ZoneIndices [16]uint32 + FirstPrefix *IpAdapterPrefix + /* more fields might be present here. */ +} + +const ( + IfOperStatusUp = 1 + IfOperStatusDown = 2 + IfOperStatusTesting = 3 + IfOperStatusUnknown = 4 + IfOperStatusDormant = 5 + IfOperStatusNotPresent = 6 + IfOperStatusLowerLayerDown = 7 +) + +// Console related constants used for the mode parameter to SetConsoleMode. See +// https://docs.microsoft.com/en-us/windows/console/setconsolemode for details. + +const ( + ENABLE_PROCESSED_INPUT = 0x1 + ENABLE_LINE_INPUT = 0x2 + ENABLE_ECHO_INPUT = 0x4 + ENABLE_WINDOW_INPUT = 0x8 + ENABLE_MOUSE_INPUT = 0x10 + ENABLE_INSERT_MODE = 0x20 + ENABLE_QUICK_EDIT_MODE = 0x40 + ENABLE_EXTENDED_FLAGS = 0x80 + ENABLE_AUTO_POSITION = 0x100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x200 + + ENABLE_PROCESSED_OUTPUT = 0x1 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x2 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x4 + DISABLE_NEWLINE_AUTO_RETURN = 0x8 + ENABLE_LVB_GRID_WORLDWIDE = 0x10 +) + +type Coord struct { + X int16 + Y int16 +} + +type SmallRect struct { + Left int16 + Top int16 + Right int16 + Bottom int16 +} + +// Used with GetConsoleScreenBuffer to retreive information about a console +// screen buffer. See +// https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str +// for details. + +type ConsoleScreenBufferInfo struct { + Size Coord + CursorPosition Coord + Attributes uint16 + Window SmallRect + MaximumWindowSize Coord +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go new file mode 100644 index 0000000000..fe0ddd0316 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_386.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte +} + +type Servent struct { + Name *byte + Aliases **byte + Port uint16 + Proto *byte +} diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go new file mode 100644 index 0000000000..7e154c2df2 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package windows + +type WSAData struct { + Version uint16 + HighVersion uint16 + MaxSockets uint16 + MaxUdpDg uint16 + VendorInfo *byte + Description [WSADESCRIPTION_LEN + 1]byte + SystemStatus [WSASYS_STATUS_LEN + 1]byte +} + +type Servent struct { + Name *byte + Aliases **byte + Proto *byte + Port uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go new file mode 100644 index 0000000000..6ad2c85bd9 --- /dev/null +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -0,0 +1,2687 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package windows + +import ( + "syscall" + "unsafe" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = NewLazySystemDLL("advapi32.dll") + modkernel32 = NewLazySystemDLL("kernel32.dll") + modshell32 = NewLazySystemDLL("shell32.dll") + modmswsock = NewLazySystemDLL("mswsock.dll") + modcrypt32 = NewLazySystemDLL("crypt32.dll") + modws2_32 = NewLazySystemDLL("ws2_32.dll") + moddnsapi = NewLazySystemDLL("dnsapi.dll") + modiphlpapi = NewLazySystemDLL("iphlpapi.dll") + modsecur32 = NewLazySystemDLL("secur32.dll") + modnetapi32 = NewLazySystemDLL("netapi32.dll") + moduserenv = NewLazySystemDLL("userenv.dll") + + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procDeleteService = modadvapi32.NewProc("DeleteService") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procControlService = modadvapi32.NewProc("ControlService") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procGetLastError = modkernel32.NewProc("GetLastError") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetVersion = modkernel32.NewProc("GetVersion") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procExitProcess = modkernel32.NewProc("ExitProcess") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procReadFile = modkernel32.NewProc("ReadFile") + procWriteFile = modkernel32.NewProc("WriteFile") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindClose = modkernel32.NewProc("FindClose") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procGetFileType = modkernel32.NewProc("GetFileType") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procGetDriveType = modkernel32.NewProc("GetDriveType") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procsocket = modws2_32.NewProc("socket") + procsetsockopt = modws2_32.NewProc("setsockopt") + procgetsockopt = modws2_32.NewProc("getsockopt") + procbind = modws2_32.NewProc("bind") + procconnect = modws2_32.NewProc("connect") + procgetsockname = modws2_32.NewProc("getsockname") + procgetpeername = modws2_32.NewProc("getpeername") + proclisten = modws2_32.NewProc("listen") + procshutdown = modws2_32.NewProc("shutdown") + procclosesocket = modws2_32.NewProc("closesocket") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSASend = modws2_32.NewProc("WSASend") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procntohs = modws2_32.NewProc("ntohs") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetACP = modkernel32.NewProc("GetACP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procCopySid = modadvapi32.NewProc("CopySid") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procEqualSid = modadvapi32.NewProc("EqualSid") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") +) + +func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeregisterEventSource(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseServiceHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteService(service Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) { + r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) { + r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) { + r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLastError() (lasterr error) { + r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) + if r0 != 0 { + lasterr = syscall.Errno(r0) + } + return +} + +func LoadLibrary(libname string) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibrary(_p0) +} + +func _LoadLibrary(libname *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(libname) + if err != nil { + return + } + return _LoadLibraryEx(_p0, zero, flags) +} + +func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeLibrary(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcAddress(module Handle, procname string) (proc uintptr, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(procname) + if err != nil { + return + } + return _GetProcAddress(module, _p0) +} + +func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { + r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0) + proc = uintptr(r0) + if proc == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVersion() (ver uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) + ver = uint32(r0) + if ver == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) { + var _p0 *uint16 + if len(buf) > 0 { + _p0 = &buf[0] + } + r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ExitProcess(exitcode uint32) { + syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) + return +} + +func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteFile(handle Handle, buf []byte, done *uint32, overlapped *Overlapped) (err error) { + var _p0 *byte + if len(buf) > 0 { + _p0 = &buf[0] + } + r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) { + r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0) + newlowoffset = uint32(r0) + if newlowoffset == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CloseHandle(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStdHandle(stdhandle uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetStdHandle(stdhandle uint32, handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func findNextFile1(handle Handle, data *win32finddata1) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindClose(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetCurrentDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) { + r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RemoveDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteFile(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFile(from *uint16, to *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerName(buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEndOfFile(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetSystemTimeAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetSystemTimePreciseAsFileTime(time *Filetime) { + syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0) + return +} + +func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0) + rc = uint32(r0) + if rc == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uint32, threadcnt uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uint32, overlapped **Overlapped, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIo(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CancelIoEx(s Handle, o *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) { + var _p0 uint32 + if inheritHandles { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcess(da uint32, inheritHandle bool, pid uint32) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(da), uintptr(_p0), uintptr(pid)) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TerminateProcess(handle Handle, exitcode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetStartupInfo(startupInfo *StartupInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentProcess() (pseudoHandle Handle, err error) { + r0, _, e1 := syscall.Syscall(procGetCurrentProcess.Addr(), 0, 0, 0, 0) + pseudoHandle = Handle(r0) + if pseudoHandle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { + var _p0 uint32 + if bInheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) { + r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0) + event = uint32(r0) + if event == 0xffffffff { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileType(filehandle Handle) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptReleaseContext(provhandle Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) { + r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentStrings() (envs *uint16, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0) + envs = (*uint16)(unsafe.Pointer(r0)) + if envs == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeEnvironmentStrings(envs *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) { + r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributes(name *uint16) (attrs uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + attrs = uint32(r0) + if attrs == INVALID_FILE_ATTRIBUTES { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetFileAttributes(name *uint16, attrs uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) { + r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCommandLine() (cmd *uint16) { + r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0) + cmd = (*uint16)(unsafe.Pointer(r0)) + return +} + +func CommandLineToArgv(cmd *uint16, argc *int32) (argv *[8192]*[8192]uint16, err error) { + r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0) + argv = (*[8192]*[8192]uint16)(unsafe.Pointer(r0)) + if argv == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LocalFree(hmem Handle) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0) + handle = Handle(r0) + if handle != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushFileBuffers(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0) + addr = uintptr(r0) + if addr == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnmapViewOfFile(addr uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FlushViewOfFile(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualLock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualUnlock(addr uintptr, length uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) { + r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0) + value = uintptr(r0) + if value == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) { + r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + var _p0 uint32 + if watchSubTree { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) { + r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0) + store = Handle(r0) + if store == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) { + r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertCloseStore(store Handle, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) { + r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateChain(ctx *CertChainContext) { + syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + return +} + +func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) { + r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen)) + context = (*CertContext)(unsafe.Pointer(r0)) + if context == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertFreeCertificateContext(ctx *CertContext) (err error) { + r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) { + r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegCloseKey(key Handle) (regerrno error) { + r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + +func getCurrentProcessId() (pid uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0) + pid = uint32(r0) + return +} + +func GetConsoleMode(console Handle, mode *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetConsoleMode(console Handle, mode uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { + r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) { + r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) { + r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) { + r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved)) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetCurrentThreadId() (id uint32) { + r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0) + id = uint32(r0) + return +} + +func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenEvent(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ResetEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func PulseEvent(event Handle) (err error) { + r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeClose(findVolume Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { + r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetDriveType(rootPathName *uint16) (driveType uint32) { + r0, _, _ := syscall.Syscall(procGetDriveType.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) + driveType = uint32(r0) + return +} + +func GetLogicalDrives() (drivesBitMask uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0) + drivesBitMask = uint32(r0) + if drivesBitMask == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { + r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func WSACleanup() (err error) { + r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) { + r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func socket(af int32, typ int32, protocol int32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol)) + handle = Handle(r0) + if handle == InvalidHandle { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) { + r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) { + r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) { + r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func listen(s Handle, backlog int32) (err error) { + r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func shutdown(s Handle, how int32) (err error) { + r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Closesocket(s Handle) (err error) { + r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) { + syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0) + return +} + +func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) { + r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine))) + if r1 == socket_error { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetHostByName(name string) (h *Hostent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetHostByName(_p0) +} + +func _GetHostByName(name *byte) (h *Hostent, err error) { + r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + h = (*Hostent)(unsafe.Pointer(r0)) + if h == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetServByName(name string, proto string) (s *Servent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + var _p1 *byte + _p1, err = syscall.BytePtrFromString(proto) + if err != nil { + return + } + return _GetServByName(_p0, _p1) +} + +func _GetServByName(name *byte, proto *byte) (s *Servent, err error) { + r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0) + s = (*Servent)(unsafe.Pointer(r0)) + if s == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func Ntohs(netshort uint16) (u uint16) { + r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0) + u = uint16(r0) + return +} + +func GetProtoByName(name string) (p *Protoent, err error) { + var _p0 *byte + _p0, err = syscall.BytePtrFromString(name) + if err != nil { + return + } + return _GetProtoByName(_p0) +} + +func _GetProtoByName(name *byte) (p *Protoent, err error) { + r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0) + p = (*Protoent)(unsafe.Pointer(r0)) + if p == nil { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func DnsQuery(name string, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + var _p0 *uint16 + _p0, status = syscall.UTF16PtrFromString(name) + if status != nil { + return + } + return _DnsQuery(_p0, qtype, options, extra, qrs, pr) +} + +func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) { + r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr))) + if r0 != 0 { + status = syscall.Errno(r0) + } + return +} + +func DnsRecordListFree(rl *DNSRecord, freetype uint32) { + syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0) + return +} + +func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) { + r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0) + same = r0 != 0 + return +} + +func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) { + r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0) + if r0 != 0 { + sockerr = syscall.Errno(r0) + } + return +} + +func FreeAddrInfoW(addrinfo *AddrinfoW) { + syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0) + return +} + +func GetIfEntry(pIfRow *MibIfRow) (errcode error) { + r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) { + r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) { + r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength))) + n = int32(r0) + if n == -1 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) { + r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func GetACP() (acp uint32) { + r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0) + acp = uint32(r0) + return +} + +func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) { + r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar)) + nwrite = int32(r0) + if nwrite == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) + if r1&0xff == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) { + r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) { + r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType))) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func NetApiBufferFree(buf *byte) (neterr error) { + r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0) + if r0 != 0 { + neterr = syscall.Errno(r0) + } + return +} + +func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetLengthSid(sid *SID) (len uint32) { + r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + len = uint32(r0) + return +} + +func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) { + r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func FreeSid(sid *SID) (err error) { + r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0) + if r1 != 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) { + r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0) + isEqual = r0 != 0 + return +} + +func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) { + r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenProcessToken(h Handle, access uint32, token *Token) (err error) { + r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(h), uintptr(access), uintptr(unsafe.Pointer(token))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetTokenInformation(t Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(t), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/gopkg.in/alexcesaro/statsd.v1/.travis.yml b/vendor/gopkg.in/alexcesaro/statsd.v1/.travis.yml new file mode 100644 index 0000000000..48915e737a --- /dev/null +++ b/vendor/gopkg.in/alexcesaro/statsd.v1/.travis.yml @@ -0,0 +1,9 @@ +language: go + +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/vendor/gopkg.in/alexcesaro/statsd.v1/LICENSE b/vendor/gopkg.in/alexcesaro/statsd.v1/LICENSE new file mode 100644 index 0000000000..4ec7268d58 --- /dev/null +++ b/vendor/gopkg.in/alexcesaro/statsd.v1/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2015 Alexandre Cesaro + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/gopkg.in/alexcesaro/statsd.v1/README.md b/vendor/gopkg.in/alexcesaro/statsd.v1/README.md new file mode 100644 index 0000000000..00547dfcd7 --- /dev/null +++ b/vendor/gopkg.in/alexcesaro/statsd.v1/README.md @@ -0,0 +1,49 @@ +# statsd +[![Build Status](https://travis-ci.org/alexcesaro/statsd.svg?branch=v1)](https://travis-ci.org/alexcesaro/statsd) [![Code Coverage](http://gocover.io/_badge/gopkg.in/alexcesaro/statsd.v1)](http://gocover.io/gopkg.in/alexcesaro/statsd.v1) [![Documentation](https://godoc.org/gopkg.in/alexcesaro/statsd.v1?status.svg)](https://godoc.org/gopkg.in/alexcesaro/statsd.v1) + +## Introduction + +statsd is a simple and efficient [Statsd](https://github.com/etsy/statsd) +client. + +See the [benchmark](https://github.com/alexcesaro/statsdbench) for a comparison +with other Go StatsD clients. + +## Features + +- Supports all StatsD metrics: counter, gauge, timing and set +- Supports Datadog and InfluxDB tags +- Fast and GC-friendly: Client's methods do not allocate +- Simple API +- 100% test coverage +- Versioned API using gopkg.in + + +## Documentation + +https://godoc.org/gopkg.in/alexcesaro/statsd.v1 + + +## Download + + go get gopkg.in/alexcesaro/statsd.v1 + + +## Example + +See the [examples in the documentation](https://godoc.org/gopkg.in/alexcesaro/statsd.v1#example-package). + + +## License + +[MIT](LICENSE) + + +## Contribute + +Do you have any question the documentation does not answer? Is there a use case +that you feel is common and is not well-addressed by the current API? + +If so you are more than welcome to ask questions in the +[thread on golang-nuts](https://groups.google.com/d/topic/golang-nuts/Tz6t4_iLgnw/discussion) +or open an issue or send a pull-request here on Github. diff --git a/vendor/gopkg.in/alexcesaro/statsd.v1/doc.go b/vendor/gopkg.in/alexcesaro/statsd.v1/doc.go new file mode 100644 index 0000000000..ffd6c066d3 --- /dev/null +++ b/vendor/gopkg.in/alexcesaro/statsd.v1/doc.go @@ -0,0 +1,17 @@ +/* +Package statsd is a simple and efficient StatsD client. + +Client's methods are fast and do not allocate memory. + +Internally, Client's methods buffers metrics. The buffer is flushed when either: + - the background goroutine flushes the buffer (every 100ms by default) + - the buffer is full (1440 bytes by default so that IP packets are not + fragmented) + +The background goroutine can be disabled using the WithFlushPeriod(0) option. + +Buffering can be disabled using the WithMaxPacketSize(0) option. + +StatsD homepage: https://github.com/etsy/statsd +*/ +package statsd diff --git a/vendor/gopkg.in/alexcesaro/statsd.v1/examples_test.go b/vendor/gopkg.in/alexcesaro/statsd.v1/examples_test.go new file mode 100644 index 0000000000..1459aa65b5 --- /dev/null +++ b/vendor/gopkg.in/alexcesaro/statsd.v1/examples_test.go @@ -0,0 +1,82 @@ +package statsd_test + +import ( + "log" + "net/http" + "runtime" + "time" + + "github.com/alexcesaro/statsd" +) + +func Example() { + c, err := statsd.New(":8125") + if err != nil { + panic(err) + } + + c.Increment("foo.counter") + c.Gauge("num_goroutine", runtime.NumGoroutine()) + + t := c.NewTiming() + http.Get("http://example.com/") + t.Send("homepage.response_time", 1) + // Can also be used as a one-liner in a function: + // func PingHomepage() { + // defer c.NewTiming().Send("homepage.response_time", 1) + // http.Get("http://example.com/") + // } + + c.Close() +} + +func ExampleMute() { + c, err := statsd.New(":8125", statsd.Mute(true)) + if err != nil { + panic(err) + } + c.Increment("foo.bar") // Does nothing. +} + +func ExampleWithDatadogTags() { + statsd.New(":8125", statsd.WithDatadogTags("region:us", "app:my_app")) +} + +func ExampleWithErrorHandler() { + statsd.New(":8125", statsd.WithErrorHandler(func(err error) { + log.Print(err) + })) +} + +func ExampleWithFlushPeriod() { + statsd.New(":8125", statsd.WithFlushPeriod(10*time.Millisecond)) +} + +func ExampleWithInfluxDBTags() { + statsd.New(":8125", statsd.WithInfluxDBTags("region", "us", "app", "my_app")) +} + +func ExampleWithMaxPacketSize() { + statsd.New(":8125", statsd.WithMaxPacketSize(512)) +} + +func ExampleWithNetwork() { + // Send metrics using a TCP connection. + statsd.New(":8125", statsd.WithNetwork("tcp")) +} + +func ExampleWithPrefix() { + c, err := statsd.New(":8125", statsd.WithPrefix("my_app.")) + if err != nil { + panic(err) + } + c.Increment("foo.bar") // Increments "my_app.foo.bar". +} + +var c *statsd.Client + +func ExampleClient_NewTiming() { + // Send a timing metric each time the function is run. + defer c.NewTiming().Send("homepage.response_time", 1) + http.Get("http://example.com/") +} diff --git a/vendor/gopkg.in/alexcesaro/statsd.v1/statsd.go b/vendor/gopkg.in/alexcesaro/statsd.v1/statsd.go new file mode 100644 index 0000000000..0617d0e1cf --- /dev/null +++ b/vendor/gopkg.in/alexcesaro/statsd.v1/statsd.go @@ -0,0 +1,453 @@ +package statsd + +import ( + "bytes" + "math/rand" + "net" + "strconv" + "sync" + "time" +) + +// A Client represents a StatsD client. +type Client struct { + mu sync.Mutex + + // Fields guarded by the mutex. + conn net.Conn + buf []byte + rateCache map[float32]string + closed bool + + // Fields settable with options at Client's creation. + muted bool + errorHandler func(error) + flushPeriod time.Duration + maxPacketSize int + network string + prefix string + tagFormat tagFormat + tags string +} + +// An Option represents an option for a Client. It must be used as an argument +// to New(). +type Option func(*Client) + +// Mute sets whether the Client is muted. +func Mute(b bool) Option { + return Option(func(c *Client) { + c.muted = b + }) +} + +// WithErrorHandler sets the error handling function used by the Client. +func WithErrorHandler(h func(error)) Option { + return Option(func(c *Client) { + c.errorHandler = h + }) +} + +// WithFlushPeriod sets how often the Client's buffer is flushed. +// If period is 0, the goroutine that periodically flush the buffer is not +// lauched and the buffer is only flushed when it is full. +// +// By default the flush period is 100 milliseconds. +func WithFlushPeriod(period time.Duration) Option { + return Option(func(c *Client) { + c.flushPeriod = period + }) +} + +// WithMaxPacketSize sets the maximum packet size in bytes sent by the Client. +// +// By default it is 1440. +func WithMaxPacketSize(n int) Option { + return Option(func(c *Client) { + c.maxPacketSize = n + }) +} + +// WithNetwork sets the network (udp, tcp, etc) used by the client. +// See net.Dial documentation: https://golang.org/pkg/net/#Dial +// +// By default, network is udp. +func WithNetwork(network string) Option { + return Option(func(c *Client) { + c.network = network + }) +} + +// WithPrefix sets the prefix prepended to every bucket name. +func WithPrefix(prefix string) Option { + return Option(func(c *Client) { + c.prefix = prefix + }) +} + +// WithDatadogTags sets the Datadog tags sent with every metrics. +// +// The tags should have the key:value syntax. +// See http://docs.datadoghq.com/guides/metrics/#tags +func WithDatadogTags(tags ...string) Option { + return Option(func(c *Client) { + // Datadog tag format: |#tag1:value1,tag2,tag3:value3 + // See http://docs.datadoghq.com/guides/dogstatsd/#datagram-format + buf := bytes.NewBufferString("|#") + first := true + for i := 0; i < len(tags); i++ { + if first { + first = false + } else { + buf.WriteByte(',') + } + buf.WriteString(tags[i]) + } + c.tagFormat = datadogFormat + c.tags = buf.String() + }) +} + +// WithInfluxDBTags sets the InfluxDB tags sent with every metrics. +// +// The tags must be set as key-value pairs. If the number of tags is not even, +// WithInfluxDBTags panics. +// +// See https://influxdb.com/blog/2015/11/03/getting_started_with_influx_statsd.html +func WithInfluxDBTags(tags ...string) Option { + if len(tags)%2 != 0 { + panic("statsd: WithInfluxDBTags only accepts an even number arguments") + } + + // InfluxDB tag format: ,tag1=payroll,region=us-west + // https://influxdb.com/blog/2015/11/03/getting_started_with_influx_statsd.html + return Option(func(c *Client) { + var buf bytes.Buffer + for i := 0; i < len(tags)/2; i++ { + buf.WriteByte(',') + buf.WriteString(tags[2*i]) + buf.WriteByte('=') + buf.WriteString(tags[2*i+1]) + } + c.tagFormat = influxDBFormat + c.tags = buf.String() + }) +} + +type tagFormat uint8 + +const ( + datadogFormat tagFormat = iota + 1 + influxDBFormat +) + +// New returns a new Client with the given options. It returns an error only if +// there is a network error. In this case, it returns a muted client along with +// the error. +func New(addr string, options ...Option) (*Client, error) { + c := &Client{ + flushPeriod: 100 * time.Millisecond, + // Worst-case scenario: + // Ethernet MTU - IPv6 Header - TCP Header = 1500 - 40 - 20 = 1440 + maxPacketSize: 1440, + } + + for _, o := range options { + o(c) + } + + if c.muted { + return c, nil + } + + if c.network == "" { + c.network = "udp" + } + var err error + c.conn, err = dialTimeout(c.network, addr, 5*time.Second) + if err != nil { + return &Client{muted: true}, err + } + // When using UDP do a quick check to see if something is listening on the + // given port to return an error as soon as possible. + if c.network[:3] == "udp" { + for i := 0; i < 2; i++ { + _, err = c.conn.Write(nil) + if err != nil { + _ = c.conn.Close() + return &Client{muted: true}, err + } + } + } + + // To prevent a buffer overflow add some capacity to the buffer to allow for + // an additional metric. + c.buf = make([]byte, 0, c.maxPacketSize+200) + + if c.flushPeriod > 0 { + go func() { + ticker := time.NewTicker(c.flushPeriod) + for _ = range ticker.C { + c.mu.Lock() + if c.closed { + ticker.Stop() + c.mu.Unlock() + return + } + c.flush(0) + c.mu.Unlock() + } + }() + } + + return c, nil +} + +// Count adds n to bucket with the given sampling rate. +func (c *Client) Count(bucket string, n int, rate float32) { + if c.muted { + return + } + if isRandAbove(rate) { + return + } + + c.mu.Lock() + l := len(c.buf) + c.appendBucket(bucket) + c.appendInt(n) + c.appendType("c") + c.appendRate(rate) + c.closeMetric() + c.flushIfBufferFull(l) + c.mu.Unlock() +} + +func isRandAbove(rate float32) bool { + return rate != 1 && randFloat() > rate +} + +// Increment increment the given bucket. +// It is equivalent to Count(bucket, 1, 1). +func (c *Client) Increment(bucket string) { + c.Count(bucket, 1, 1) +} + +// Gauge records an absolute value for the given bucket. +func (c *Client) Gauge(bucket string, value int) { + if c.muted { + return + } + + c.mu.Lock() + l := len(c.buf) + // To set a gauge to a negative value we must first set it to 0. + // https://github.com/etsy/statsd/blob/master/docs/metric_types.md#gauges + if value < 0 { + c.appendBucket(bucket) + c.gauge(0) + } + c.appendBucket(bucket) + c.gauge(value) + c.flushIfBufferFull(l) + c.mu.Unlock() +} + +// ChangeGauge changes the value of a gauge by the given delta. +func (c *Client) ChangeGauge(bucket string, delta int) { + if c.muted { + return + } + if delta == 0 { + return + } + + c.mu.Lock() + l := len(c.buf) + c.appendBucket(bucket) + if delta > 0 { + c.appendByte('+') + } + c.gauge(delta) + c.flushIfBufferFull(l) + c.mu.Unlock() +} + +func (c *Client) gauge(value int) { + c.appendInt(value) + c.appendType("g") + c.closeMetric() +} + +// Timing sends a timing value to a bucket with the given sampling rate. +func (c *Client) Timing(bucket string, value int, rate float32) { + if c.muted { + return + } + if isRandAbove(rate) { + return + } + + c.mu.Lock() + l := len(c.buf) + c.appendBucket(bucket) + c.appendInt(value) + c.appendType("ms") + c.appendRate(rate) + c.closeMetric() + c.flushIfBufferFull(l) + c.mu.Unlock() +} + +// A Timing is an helper object that eases sending timing values. +type Timing struct { + start time.Time + c *Client +} + +// NewTiming creates a new Timing. +func (c *Client) NewTiming() Timing { + return Timing{start: now(), c: c} +} + +// Send sends the time elapsed since the creation of the Timing to a bucket +// with the given sampling rate. +func (t Timing) Send(bucket string, rate float32) { + t.c.Timing(bucket, int(t.Duration()/time.Millisecond), rate) +} + +// Duration gets the duration since the creation of the Timing. +func (t Timing) Duration() time.Duration { + return now().Sub(t.start) +} + +// Unique sends the given value to a set bucket. +func (c *Client) Unique(bucket string, value string) { + if c.muted { + return + } + + c.mu.Lock() + l := len(c.buf) + c.appendBucket(bucket) + c.appendString(value) + c.appendType("s") + c.closeMetric() + c.flushIfBufferFull(l) + c.mu.Unlock() +} + +// Flush flushes the Client's buffer. +func (c *Client) Flush() { + if c.muted { + return + } + + c.mu.Lock() + c.flush(0) + c.mu.Unlock() +} + +// Close flushes the Client's buffer and releases the associated ressources. +func (c *Client) Close() { + if c.muted { + return + } + + c.mu.Lock() + c.flush(0) + c.handleError(c.conn.Close()) + c.closed = true + c.mu.Unlock() +} + +func (c *Client) appendByte(b byte) { + c.buf = append(c.buf, b) +} + +func (c *Client) appendString(s string) { + c.buf = append(c.buf, s...) +} + +func (c *Client) appendInt(i int) { + c.buf = strconv.AppendInt(c.buf, int64(i), 10) +} + +func (c *Client) appendBucket(bucket string) { + if c.prefix != "" { + c.appendString(c.prefix) + } + c.appendString(bucket) + if c.tagFormat == influxDBFormat { + c.appendString(c.tags) + } + c.appendByte(':') +} + +func (c *Client) appendType(t string) { + c.appendByte('|') + c.appendString(t) +} + +func (c *Client) appendRate(rate float32) { + if rate == 1 { + return + } + if c.rateCache == nil { + c.rateCache = make(map[float32]string) + } + + c.appendString("|@") + if s, ok := c.rateCache[rate]; ok { + c.appendString(s) + } else { + s = strconv.FormatFloat(float64(rate), 'f', -1, 32) + c.rateCache[rate] = s + c.appendString(s) + } +} + +func (c *Client) closeMetric() { + if c.tagFormat == datadogFormat { + c.appendString(c.tags) + } + c.appendByte('\n') +} + +func (c *Client) flushIfBufferFull(lastSafeLen int) { + if len(c.buf) > c.maxPacketSize { + c.flush(lastSafeLen) + } +} + +// flush flushes the first n bytes of the buffer. +// If n is 0, the whole buffer is flushed. +func (c *Client) flush(n int) { + if len(c.buf) == 0 { + return + } + if n == 0 { + n = len(c.buf) + } + + // Trim the last \n, StatsD does not like it. + _, err := c.conn.Write(c.buf[:n-1]) + c.handleError(err) + if n < len(c.buf) { + copy(c.buf, c.buf[n:]) + } + c.buf = c.buf[:len(c.buf)-n] +} + +func (c *Client) handleError(err error) { + if err != nil && c.errorHandler != nil { + c.errorHandler(err) + } +} + +// Stubbed out for testing. +var ( + dialTimeout = net.DialTimeout + now = time.Now + randFloat = rand.Float32 +) diff --git a/vendor/gopkg.in/alexcesaro/statsd.v1/statsd_test.go b/vendor/gopkg.in/alexcesaro/statsd.v1/statsd_test.go new file mode 100644 index 0000000000..873c53b2ad --- /dev/null +++ b/vendor/gopkg.in/alexcesaro/statsd.v1/statsd_test.go @@ -0,0 +1,466 @@ +package statsd + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "net" + "sync" + "testing" + "time" +) + +const ( + testAddr = ":0" + testKey = "test_key" +) + +var testDate = time.Date(2015, 10, 22, 16, 53, 0, 0, time.UTC) + +func TestCount(t *testing.T) { + testOutput(t, "test_key:5|c", func(c *Client) { + c.Count(testKey, 5, 1) + }) +} + +func TestIncrement(t *testing.T) { + testOutput(t, "test_key:1|c", func(c *Client) { + c.Increment(testKey) + }) +} + +func TestGauge(t *testing.T) { + testOutput(t, "test_key:5|g\ntest_key:0|g\ntest_key:-10|g", func(c *Client) { + c.Gauge(testKey, 5) + c.Gauge(testKey, -10) + }) +} + +func TestChangeGauge(t *testing.T) { + testOutput(t, "test_key:+17|g\ntest_key:-1|g", func(c *Client) { + c.ChangeGauge(testKey, 17) + c.ChangeGauge(testKey, -1) + c.ChangeGauge(testKey, 0) + }) +} + +func TestTiming(t *testing.T) { + testOutput(t, "test_key:6|ms", func(c *Client) { + c.Timing(testKey, 6, 1) + }) +} + +func TestNewTiming(t *testing.T) { + i := 0 + now = func() time.Time { + i++ + switch i { + default: + return testDate + case 2: + return testDate.Add(10 * time.Millisecond) + case 3: + return testDate.Add(100 * time.Millisecond) + case 4: + return testDate.Add(time.Second) + } + } + defer func() { now = time.Now }() + + testOutput(t, "test_key:10|ms\ntest_key:1000|ms", func(c *Client) { + timing := c.NewTiming() + timing.Send(testKey, 1) + + got := timing.Duration().Nanoseconds() + want := int64(100 * time.Millisecond) + if got != want { + t.Errorf("Duration() = %v, want %v", got, want) + } + + timing.Send(testKey, 1) + }) +} + +func TestUnique(t *testing.T) { + testOutput(t, "test_key:foo|s", func(c *Client) { + c.Unique(testKey, "foo") + }) +} + +func TestSamplingRate(t *testing.T) { + testOutput(t, "test_key:3|c|@0.6\ntest_key:4|ms|@0.6", func(c *Client) { + randFloat = func() float32 { return 0.5 } + c.Count(testKey, 1, 0.2) + c.Timing(testKey, 2, 0.3) + c.Count(testKey, 3, 0.6) + c.Timing(testKey, 4, 0.6) + }) +} + +func TestMute(t *testing.T) { + dialTimeout = func(string, string, time.Duration) (net.Conn, error) { + t.Fatal("net.Dial should not be called") + return nil, nil + } + defer func() { dialTimeout = net.DialTimeout }() + + c, err := New("", Mute(true)) + if err != nil { + t.Errorf("New() = %v", err) + } + c.Increment(testKey) + c.Gauge(testKey, 1) + c.ChangeGauge(testKey, 1) + c.Timing(testKey, 1, 1) + c.Unique(testKey, "1") + c.Flush() + c.Close() +} + +func TestPrefix(t *testing.T) { + testOutput(t, "foo.test_key:1|c", func(c *Client) { + c.Increment(testKey) + }, WithPrefix("foo.")) +} + +func TestDatadogTags(t *testing.T) { + testOutput(t, "test_key:1|c|#tag1:value1,tag2,tag3:value3", func(c *Client) { + c.Increment(testKey) + }, WithDatadogTags("tag1:value1", "tag2", "tag3:value3")) +} + +func TestInfluxDBTags(t *testing.T) { + testOutput(t, "test_key,key1=value1,key2=value2:1|c", func(c *Client) { + c.Increment(testKey) + }, WithInfluxDBTags("key1", "value1", "key2", "value2")) +} + +func TestInfluxDBTagsPanic(t *testing.T) { + defer func() { + r := recover() + if r == nil { + t.Fatal("WithInfluxDBTags should panic when only one argument is provided") + } + }() + New("", WithInfluxDBTags("key1")) +} + +func TestErrorHandler(t *testing.T) { + errorCount := 0 + testClient(t, func(c *Client) { + getMockConn(c).err = errors.New("test error") + + c.Increment(testKey) + c.Close() + if errorCount != 2 { + t.Errorf("Wrong error count, got %d, want 2", errorCount) + } + }, WithErrorHandler(func(err error) { + if err == nil { + t.Error("Error should not be nil") + } + errorCount++ + })) +} + +func TestFlush(t *testing.T) { + testClient(t, func(c *Client) { + c.Increment(testKey) + c.Flush() + got := getMockConn(c).buf.String() + want := "test_key:1|c" + if got != want { + t.Errorf("Invalid output, got %q, want %q", got, want) + } + c.Close() + }) +} + +func TestFlushPeriod(t *testing.T) { + testClient(t, func(c *Client) { + c.Increment(testKey) + time.Sleep(time.Millisecond) + c.mu.Lock() + got := getMockConn(c).buf.String() + want := "test_key:1|c" + if got != want { + t.Errorf("Invalid output, got %q, want %q", got, want) + } + c.mu.Unlock() + c.Close() + }, WithFlushPeriod(time.Nanosecond)) +} + +func TestMaxPacketSize(t *testing.T) { + testClient(t, func(c *Client) { + c.Increment(testKey) + conn := getMockConn(c) + got := conn.buf.String() + if got != "" { + t.Errorf("Output should be empty, got %q", got) + } + + c.Increment(testKey) + got = conn.buf.String() + want := "test_key:1|c" + if got != want { + t.Errorf("Invalid output, got %q, want %q", got, want) + } + conn.buf.Reset() + c.Close() + + got = conn.buf.String() + if got != want { + t.Errorf("Invalid output, got %q, want %q", got, want) + } + }, WithMaxPacketSize(15)) +} + +func TestDialError(t *testing.T) { + dialTimeout = func(string, string, time.Duration) (net.Conn, error) { + return nil, errors.New("") + } + defer func() { dialTimeout = net.DialTimeout }() + + c, err := New(testAddr) + if c == nil || !c.muted { + t.Error("New() did not return a muted client") + } + if err == nil { + t.Error("New() did not return an error") + } +} + +func TestConcurrency(t *testing.T) { + testOutput(t, "test_key:1|c\ntest_key:1|c\ntest_key:1|c", func(c *Client) { + var wg sync.WaitGroup + wg.Add(1) + c.Increment(testKey) + go func() { + c.Increment(testKey) + wg.Done() + }() + c.Increment(testKey) + wg.Wait() + }) +} + +func TestUDPNotListening(t *testing.T) { + dialTimeout = mockUDPClosed + defer func() { dialTimeout = net.DialTimeout }() + + c, err := New(testAddr) + if c == nil || !c.muted { + t.Error("New() did not return a muted client") + } + if err == nil { + t.Error("New should return an error") + } +} + +type mockClosedUDPConn struct { + i int + net.Conn +} + +func (c *mockClosedUDPConn) Write(p []byte) (int, error) { + c.i++ + if c.i == 2 { + return 0, errors.New("test error") + } + return 0, nil +} + +func (c *mockClosedUDPConn) Close() error { + return nil +} + +func mockUDPClosed(string, string, time.Duration) (net.Conn, error) { + return &mockClosedUDPConn{}, nil +} + +func testClient(t *testing.T, f func(*Client), options ...Option) { + dialTimeout = mockDial + defer func() { dialTimeout = net.DialTimeout }() + + options = append([]Option{ + WithFlushPeriod(0), + WithErrorHandler(expectNoError(t)), + }, options...) + c, err := New(testAddr, options...) + if err != nil { + t.Fatalf("New: %v", err) + } + + f(c) +} + +func testOutput(t *testing.T, want string, f func(*Client), options ...Option) { + testClient(t, func(c *Client) { + f(c) + c.Close() + + got := c.conn.(*mockConn).buf.String() + if got != want { + t.Errorf("Invalid output, got %q, want %q", got, want) + } + }, options...) +} + +func expectNoError(t *testing.T) func(error) { + return func(err error) { + t.Errorf("ErrorHandler should not receive an error: %v", err) + } +} + +type mockConn struct { + buf bytes.Buffer + err error + net.Conn +} + +func (c *mockConn) Write(p []byte) (int, error) { + if c.err != nil { + return 0, c.err + } + return c.buf.Write(p) +} + +func (c *mockConn) Close() error { + return c.err +} + +func getMockConn(c *Client) *mockConn { + if mock, ok := c.conn.(*mockConn); ok { + return mock + } + return nil +} + +func mockDial(string, string, time.Duration) (net.Conn, error) { + return &mockConn{}, nil +} + +func TestUDP(t *testing.T) { + testNetwork(t, "udp") +} + +func TestTCP(t *testing.T) { + testNetwork(t, "tcp") +} + +func testNetwork(t *testing.T, network string) { + received := make(chan bool) + server := newServer(t, network, testAddr, func(p []byte) { + s := string(p) + if s != "test_key:1|c" { + t.Errorf("invalid output: %q", s) + } + received <- true + }) + defer server.Close() + + c, err := New(server.addr, + WithNetwork(network), WithErrorHandler(expectNoError(t))) + if err != nil { + t.Fatalf("New: %v", err) + } + + c.Increment(testKey) + c.Close() + select { + case <-time.After(100 * time.Millisecond): + t.Error("server received nothing after 100ms") + case <-received: + } +} + +type server struct { + t testing.TB + addr string + closer io.Closer + closed chan bool +} + +func newServer(t testing.TB, network, addr string, f func([]byte)) *server { + s := &server{t: t, closed: make(chan bool)} + switch network { + case "udp": + laddr, err := net.ResolveUDPAddr("udp", addr) + if err != nil { + t.Fatal(err) + } + conn, err := net.ListenUDP("udp", laddr) + if err != nil { + t.Fatal(err) + } + s.closer = conn + s.addr = conn.LocalAddr().String() + go func() { + buf := make([]byte, 1024) + for { + n, err := conn.Read(buf) + if err != nil { + s.closed <- true + return + } + if n > 0 { + f(buf[:n]) + } + } + }() + case "tcp": + ln, err := net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + s.closer = ln + s.addr = ln.Addr().String() + go func() { + for { + conn, err := ln.Accept() + if err != nil { + s.closed <- true + return + } + p, err := ioutil.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + if err := conn.Close(); err != nil { + t.Fatal(err) + } + f(p) + } + }() + default: + t.Fatalf("Invalid network: %q", network) + } + + return s +} + +func (s *server) Close() { + if err := s.closer.Close(); err != nil { + s.t.Error(err) + } + <-s.closed +} + +func Benchmark(b *testing.B) { + s := newServer(b, "udp", testAddr, func([]byte) {}) + c, err := New(s.addr, WithFlushPeriod(0)) + if err != nil { + b.Fatal(err) + } + for i := 0; i < b.N; i++ { + c.Increment(testKey) + c.Count(testKey, 17, .5) + c.Gauge(testKey, 17) + c.Timing(testKey, 17, 1) + c.NewTiming().Send(testKey, 1) + } + c.Close() + s.Close() +} diff --git a/vendor/gopkg.in/inf.v0/benchmark_test.go b/vendor/gopkg.in/inf.v0/benchmark_test.go new file mode 100644 index 0000000000..27071da0e8 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/benchmark_test.go @@ -0,0 +1,210 @@ +package inf + +import ( + "fmt" + "math/big" + "math/rand" + "sync" + "testing" +) + +const maxcap = 1024 * 1024 +const bits = 256 +const maxscale = 32 + +var once sync.Once + +var decInput [][2]Dec +var intInput [][2]big.Int + +var initBench = func() { + decInput = make([][2]Dec, maxcap) + intInput = make([][2]big.Int, maxcap) + max := new(big.Int).Lsh(big.NewInt(1), bits) + r := rand.New(rand.NewSource(0)) + for i := 0; i < cap(decInput); i++ { + decInput[i][0].SetUnscaledBig(new(big.Int).Rand(r, max)). + SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale))) + decInput[i][1].SetUnscaledBig(new(big.Int).Rand(r, max)). + SetScale(Scale(r.Int31n(int32(2*maxscale-1)) - int32(maxscale))) + } + for i := 0; i < cap(intInput); i++ { + intInput[i][0].Rand(r, max) + intInput[i][1].Rand(r, max) + } +} + +func doBenchmarkDec1(b *testing.B, f func(z *Dec)) { + once.Do(initBench) + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + f(&decInput[i%maxcap][0]) + } +} + +func doBenchmarkDec2(b *testing.B, f func(x, y *Dec)) { + once.Do(initBench) + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + f(&decInput[i%maxcap][0], &decInput[i%maxcap][1]) + } +} + +func doBenchmarkInt1(b *testing.B, f func(z *big.Int)) { + once.Do(initBench) + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + f(&intInput[i%maxcap][0]) + } +} + +func doBenchmarkInt2(b *testing.B, f func(x, y *big.Int)) { + once.Do(initBench) + b.ResetTimer() + b.StartTimer() + for i := 0; i < b.N; i++ { + f(&intInput[i%maxcap][0], &intInput[i%maxcap][1]) + } +} + +func Benchmark_Dec_String(b *testing.B) { + doBenchmarkDec1(b, func(x *Dec) { + x.String() + }) +} + +func Benchmark_Dec_StringScan(b *testing.B) { + doBenchmarkDec1(b, func(x *Dec) { + s := x.String() + d := new(Dec) + fmt.Sscan(s, d) + }) +} + +func Benchmark_Dec_GobEncode(b *testing.B) { + doBenchmarkDec1(b, func(x *Dec) { + x.GobEncode() + }) +} + +func Benchmark_Dec_GobEnDecode(b *testing.B) { + doBenchmarkDec1(b, func(x *Dec) { + g, _ := x.GobEncode() + new(Dec).GobDecode(g) + }) +} + +func Benchmark_Dec_Add(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + ys := y.Scale() + y.SetScale(x.Scale()) + _ = new(Dec).Add(x, y) + y.SetScale(ys) + }) +} + +func Benchmark_Dec_AddMixed(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + _ = new(Dec).Add(x, y) + }) +} + +func Benchmark_Dec_Sub(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + ys := y.Scale() + y.SetScale(x.Scale()) + _ = new(Dec).Sub(x, y) + y.SetScale(ys) + }) +} + +func Benchmark_Dec_SubMixed(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + _ = new(Dec).Sub(x, y) + }) +} + +func Benchmark_Dec_Mul(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + _ = new(Dec).Mul(x, y) + }) +} + +func Benchmark_Dec_Mul_QuoExact(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + v := new(Dec).Mul(x, y) + _ = new(Dec).QuoExact(v, y) + }) +} + +func Benchmark_Dec_QuoRound_Fixed_Down(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + _ = new(Dec).QuoRound(x, y, 0, RoundDown) + }) +} + +func Benchmark_Dec_QuoRound_Fixed_HalfUp(b *testing.B) { + doBenchmarkDec2(b, func(x, y *Dec) { + _ = new(Dec).QuoRound(x, y, 0, RoundHalfUp) + }) +} + +func Benchmark_Int_String(b *testing.B) { + doBenchmarkInt1(b, func(x *big.Int) { + x.String() + }) +} + +func Benchmark_Int_StringScan(b *testing.B) { + doBenchmarkInt1(b, func(x *big.Int) { + s := x.String() + d := new(big.Int) + fmt.Sscan(s, d) + }) +} + +func Benchmark_Int_GobEncode(b *testing.B) { + doBenchmarkInt1(b, func(x *big.Int) { + x.GobEncode() + }) +} + +func Benchmark_Int_GobEnDecode(b *testing.B) { + doBenchmarkInt1(b, func(x *big.Int) { + g, _ := x.GobEncode() + new(big.Int).GobDecode(g) + }) +} + +func Benchmark_Int_Add(b *testing.B) { + doBenchmarkInt2(b, func(x, y *big.Int) { + _ = new(big.Int).Add(x, y) + }) +} + +func Benchmark_Int_Sub(b *testing.B) { + doBenchmarkInt2(b, func(x, y *big.Int) { + _ = new(big.Int).Sub(x, y) + }) +} + +func Benchmark_Int_Mul(b *testing.B) { + doBenchmarkInt2(b, func(x, y *big.Int) { + _ = new(big.Int).Mul(x, y) + }) +} + +func Benchmark_Int_Quo(b *testing.B) { + doBenchmarkInt2(b, func(x, y *big.Int) { + _ = new(big.Int).Quo(x, y) + }) +} + +func Benchmark_Int_QuoRem(b *testing.B) { + doBenchmarkInt2(b, func(x, y *big.Int) { + _, _ = new(big.Int).QuoRem(x, y, new(big.Int)) + }) +} diff --git a/vendor/gopkg.in/inf.v0/dec_go1_2_test.go b/vendor/gopkg.in/inf.v0/dec_go1_2_test.go new file mode 100644 index 0000000000..5df0f7b553 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/dec_go1_2_test.go @@ -0,0 +1,33 @@ +// +build go1.2 + +package inf + +import ( + "encoding" + "encoding/json" + "testing" +) + +var _ encoding.TextMarshaler = new(Dec) +var _ encoding.TextUnmarshaler = new(Dec) + +type Obj struct { + Val *Dec +} + +func TestDecJsonMarshalUnmarshal(t *testing.T) { + o := Obj{Val: NewDec(123, 2)} + js, err := json.Marshal(o) + if err != nil { + t.Fatalf("json.Marshal(%v): got %v, want ok", o, err) + } + o2 := &Obj{} + err = json.Unmarshal(js, o2) + if err != nil { + t.Fatalf("json.Unmarshal(%#q): got %v, want ok", js, err) + } + if o.Val.Scale() != o2.Val.Scale() || + o.Val.UnscaledBig().Cmp(o2.Val.UnscaledBig()) != 0 { + t.Fatalf("json.Unmarshal(json.Marshal(%v)): want %v, got %v", o, o, o2) + } +} diff --git a/vendor/gopkg.in/inf.v0/dec_internal_test.go b/vendor/gopkg.in/inf.v0/dec_internal_test.go new file mode 100644 index 0000000000..d4fbe3e5bc --- /dev/null +++ b/vendor/gopkg.in/inf.v0/dec_internal_test.go @@ -0,0 +1,40 @@ +package inf + +import ( + "math/big" + "testing" +) + +var decQuoRemZZZ = []struct { + z, x, y *Dec + r *big.Rat + srA, srB int +}{ + // basic examples + {NewDec(1, 0), NewDec(2, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1}, + {NewDec(15, 1), NewDec(3, 0), NewDec(2, 0), big.NewRat(0, 1), 0, 1}, + {NewDec(1, 1), NewDec(1, 0), NewDec(10, 0), big.NewRat(0, 1), 0, 1}, + {NewDec(0, 0), NewDec(2, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1}, + {NewDec(0, 0), NewDec(2, 0), NewDec(6, 0), big.NewRat(1, 3), 1, 1}, + {NewDec(1, 1), NewDec(2, 0), NewDec(12, 0), big.NewRat(2, 3), 1, 1}, + + // examples from the Go Language Specification + {NewDec(1, 0), NewDec(5, 0), NewDec(3, 0), big.NewRat(2, 3), 1, 1}, + {NewDec(-1, 0), NewDec(-5, 0), NewDec(3, 0), big.NewRat(-2, 3), -1, 1}, + {NewDec(-1, 0), NewDec(5, 0), NewDec(-3, 0), big.NewRat(-2, 3), 1, -1}, + {NewDec(1, 0), NewDec(-5, 0), NewDec(-3, 0), big.NewRat(2, 3), -1, -1}, +} + +func TestDecQuoRem(t *testing.T) { + for i, a := range decQuoRemZZZ { + z, rA, rB := new(Dec), new(big.Int), new(big.Int) + s := scaleQuoExact{}.Scale(a.x, a.y) + z.quoRem(a.x, a.y, s, true, rA, rB) + if a.z.Cmp(z) != 0 || a.r.Cmp(new(big.Rat).SetFrac(rA, rB)) != 0 { + t.Errorf("#%d QuoRemZZZ got %v, %v, %v; expected %v, %v", i, z, rA, rB, a.z, a.r) + } + if a.srA != rA.Sign() || a.srB != rB.Sign() { + t.Errorf("#%d QuoRemZZZ wrong signs, got %v, %v; expected %v, %v", i, rA.Sign(), rB.Sign(), a.srA, a.srB) + } + } +} diff --git a/vendor/gopkg.in/inf.v0/dec_test.go b/vendor/gopkg.in/inf.v0/dec_test.go new file mode 100644 index 0000000000..e4b09b3fdc --- /dev/null +++ b/vendor/gopkg.in/inf.v0/dec_test.go @@ -0,0 +1,379 @@ +package inf_test + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" + "strings" + "testing" + + "gopkg.in/inf.v0" +) + +type decFunZZ func(z, x, y *inf.Dec) *inf.Dec +type decArgZZ struct { + z, x, y *inf.Dec +} + +var decSumZZ = []decArgZZ{ + {inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)}, + {inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)}, + {inf.NewDec(1111111110, 0), inf.NewDec(123456789, 0), inf.NewDec(987654321, 0)}, + {inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(0, 0)}, + {inf.NewDec(864197532, 0), inf.NewDec(-123456789, 0), inf.NewDec(987654321, 0)}, + {inf.NewDec(-1111111110, 0), inf.NewDec(-123456789, 0), inf.NewDec(-987654321, 0)}, + {inf.NewDec(12, 2), inf.NewDec(1, 1), inf.NewDec(2, 2)}, +} + +var decProdZZ = []decArgZZ{ + {inf.NewDec(0, 0), inf.NewDec(0, 0), inf.NewDec(0, 0)}, + {inf.NewDec(0, 0), inf.NewDec(1, 0), inf.NewDec(0, 0)}, + {inf.NewDec(1, 0), inf.NewDec(1, 0), inf.NewDec(1, 0)}, + {inf.NewDec(-991*991, 0), inf.NewDec(991, 0), inf.NewDec(-991, 0)}, + {inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)}, + {inf.NewDec(2, -3), inf.NewDec(1, -1), inf.NewDec(2, -2)}, + {inf.NewDec(2, 3), inf.NewDec(1, 1), inf.NewDec(2, 2)}, +} + +func TestDecSignZ(t *testing.T) { + var zero inf.Dec + for _, a := range decSumZZ { + s := a.z.Sign() + e := a.z.Cmp(&zero) + if s != e { + t.Errorf("got %d; want %d for z = %v", s, e, a.z) + } + } +} + +func TestDecAbsZ(t *testing.T) { + var zero inf.Dec + for _, a := range decSumZZ { + var z inf.Dec + z.Abs(a.z) + var e inf.Dec + e.Set(a.z) + if e.Cmp(&zero) < 0 { + e.Sub(&zero, &e) + } + if z.Cmp(&e) != 0 { + t.Errorf("got z = %v; want %v", z, e) + } + } +} + +func testDecFunZZ(t *testing.T, msg string, f decFunZZ, a decArgZZ) { + var z inf.Dec + f(&z, a.x, a.y) + if (&z).Cmp(a.z) != 0 { + t.Errorf("%s%+v\n\tgot z = %v; want %v", msg, a, &z, a.z) + } +} + +func TestDecSumZZ(t *testing.T) { + AddZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Add(x, y) } + SubZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Sub(x, y) } + for _, a := range decSumZZ { + arg := a + testDecFunZZ(t, "AddZZ", AddZZ, arg) + + arg = decArgZZ{a.z, a.y, a.x} + testDecFunZZ(t, "AddZZ symmetric", AddZZ, arg) + + arg = decArgZZ{a.x, a.z, a.y} + testDecFunZZ(t, "SubZZ", SubZZ, arg) + + arg = decArgZZ{a.y, a.z, a.x} + testDecFunZZ(t, "SubZZ symmetric", SubZZ, arg) + } +} + +func TestDecProdZZ(t *testing.T) { + MulZZ := func(z, x, y *inf.Dec) *inf.Dec { return z.Mul(x, y) } + for _, a := range decProdZZ { + arg := a + testDecFunZZ(t, "MulZZ", MulZZ, arg) + + arg = decArgZZ{a.z, a.y, a.x} + testDecFunZZ(t, "MulZZ symmetric", MulZZ, arg) + } +} + +var decUnscaledTests = []struct { + d *inf.Dec + u int64 // ignored when ok == false + ok bool +}{ + {new(inf.Dec), 0, true}, + {inf.NewDec(-1<<63, 0), -1 << 63, true}, + {inf.NewDec(-(-1<<63 + 1), 0), -(-1<<63 + 1), true}, + {new(inf.Dec).Neg(inf.NewDec(-1<<63, 0)), 0, false}, + {new(inf.Dec).Sub(inf.NewDec(-1<<63, 0), inf.NewDec(1, 0)), 0, false}, + {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), 0, false}, +} + +func TestDecUnscaled(t *testing.T) { + for i, tt := range decUnscaledTests { + u, ok := tt.d.Unscaled() + if ok != tt.ok { + t.Errorf("#%d Unscaled: got %v, expected %v", i, ok, tt.ok) + } else if ok && u != tt.u { + t.Errorf("#%d Unscaled: got %v, expected %v", i, u, tt.u) + } + } +} + +var decRoundTests = [...]struct { + in *inf.Dec + s inf.Scale + r inf.Rounder + exp *inf.Dec +}{ + {inf.NewDec(123424999999999993, 15), 2, inf.RoundHalfUp, inf.NewDec(12342, 2)}, + {inf.NewDec(123425000000000001, 15), 2, inf.RoundHalfUp, inf.NewDec(12343, 2)}, + {inf.NewDec(123424999999999993, 15), 15, inf.RoundHalfUp, inf.NewDec(123424999999999993, 15)}, + {inf.NewDec(123424999999999993, 15), 16, inf.RoundHalfUp, inf.NewDec(1234249999999999930, 16)}, + {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -1, inf.RoundHalfUp, inf.NewDec(1844674407370955162, -1)}, + {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -2, inf.RoundHalfUp, inf.NewDec(184467440737095516, -2)}, + {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -3, inf.RoundHalfUp, inf.NewDec(18446744073709552, -3)}, + {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -4, inf.RoundHalfUp, inf.NewDec(1844674407370955, -4)}, + {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -5, inf.RoundHalfUp, inf.NewDec(184467440737096, -5)}, + {inf.NewDecBig(new(big.Int).Lsh(big.NewInt(1), 64), 0), -6, inf.RoundHalfUp, inf.NewDec(18446744073710, -6)}, +} + +func TestDecRound(t *testing.T) { + for i, tt := range decRoundTests { + z := new(inf.Dec).Round(tt.in, tt.s, tt.r) + if tt.exp.Cmp(z) != 0 { + t.Errorf("#%d Round got %v; expected %v", i, z, tt.exp) + } + } +} + +var decStringTests = []struct { + in string + out string + val int64 + scale inf.Scale // skip SetString if negative + ok bool + scanOk bool +}{ + {in: "", ok: false, scanOk: false}, + {in: "a", ok: false, scanOk: false}, + {in: "z", ok: false, scanOk: false}, + {in: "+", ok: false, scanOk: false}, + {in: "-", ok: false, scanOk: false}, + {in: "g", ok: false, scanOk: false}, + {in: ".", ok: false, scanOk: false}, + {in: ".-0", ok: false, scanOk: false}, + {in: ".+0", ok: false, scanOk: false}, + // Scannable but not SetStringable + {"0b", "ignored", 0, 0, false, true}, + {"0x", "ignored", 0, 0, false, true}, + {"0xg", "ignored", 0, 0, false, true}, + {"0.0g", "ignored", 0, 1, false, true}, + // examples from godoc for Dec + {"0", "0", 0, 0, true, true}, + {"0.00", "0.00", 0, 2, true, true}, + {"ignored", "0", 0, -2, true, false}, + {"1", "1", 1, 0, true, true}, + {"1.00", "1.00", 100, 2, true, true}, + {"10", "10", 10, 0, true, true}, + {"ignored", "10", 1, -1, true, false}, + // other tests + {"+0", "0", 0, 0, true, true}, + {"-0", "0", 0, 0, true, true}, + {"0.0", "0.0", 0, 1, true, true}, + {"0.1", "0.1", 1, 1, true, true}, + {"0.", "0", 0, 0, true, true}, + {"-10", "-10", -1, -1, true, true}, + {"-1", "-1", -1, 0, true, true}, + {"-0.1", "-0.1", -1, 1, true, true}, + {"-0.01", "-0.01", -1, 2, true, true}, + {"+0.", "0", 0, 0, true, true}, + {"-0.", "0", 0, 0, true, true}, + {".0", "0.0", 0, 1, true, true}, + {"+.0", "0.0", 0, 1, true, true}, + {"-.0", "0.0", 0, 1, true, true}, + {"0.0000000000", "0.0000000000", 0, 10, true, true}, + {"0.0000000001", "0.0000000001", 1, 10, true, true}, + {"-0.0000000000", "0.0000000000", 0, 10, true, true}, + {"-0.0000000001", "-0.0000000001", -1, 10, true, true}, + {"-10", "-10", -10, 0, true, true}, + {"+10", "10", 10, 0, true, true}, + {"00", "0", 0, 0, true, true}, + {"023", "23", 23, 0, true, true}, // decimal, not octal + {"-02.3", "-2.3", -23, 1, true, true}, // decimal, not octal +} + +func TestDecGetString(t *testing.T) { + z := new(inf.Dec) + for i, test := range decStringTests { + if !test.ok { + continue + } + z.SetUnscaled(test.val) + z.SetScale(test.scale) + + s := z.String() + if s != test.out { + t.Errorf("#%da got %s; want %s", i, s, test.out) + } + + s = fmt.Sprintf("%d", z) + if s != test.out { + t.Errorf("#%db got %s; want %s", i, s, test.out) + } + } +} + +func TestDecSetString(t *testing.T) { + tmp := new(inf.Dec) + for i, test := range decStringTests { + if test.scale < 0 { + // SetString only supports scale >= 0 + continue + } + // initialize to a non-zero value so that issues with parsing + // 0 are detected + tmp.Set(inf.NewDec(1234567890, 123)) + n1, ok1 := new(inf.Dec).SetString(test.in) + n2, ok2 := tmp.SetString(test.in) + expected := inf.NewDec(test.val, test.scale) + if ok1 != test.ok || ok2 != test.ok { + t.Errorf("#%d (input '%s') ok incorrect (should be %t)", i, test.in, test.ok) + continue + } + if !ok1 { + if n1 != nil { + t.Errorf("#%d (input '%s') n1 != nil", i, test.in) + } + continue + } + if !ok2 { + if n2 != nil { + t.Errorf("#%d (input '%s') n2 != nil", i, test.in) + } + continue + } + + if n1.Cmp(expected) != 0 { + t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val) + } + if n2.Cmp(expected) != 0 { + t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val) + } + } +} + +func TestDecScan(t *testing.T) { + tmp := new(inf.Dec) + for i, test := range decStringTests { + if test.scale < 0 { + // SetString only supports scale >= 0 + continue + } + // initialize to a non-zero value so that issues with parsing + // 0 are detected + tmp.Set(inf.NewDec(1234567890, 123)) + n1, n2 := new(inf.Dec), tmp + nn1, err1 := fmt.Sscan(test.in, n1) + nn2, err2 := fmt.Sscan(test.in, n2) + if !test.scanOk { + if err1 == nil || err2 == nil { + t.Errorf("#%d (input '%s') ok incorrect, should be %t", i, test.in, test.scanOk) + } + continue + } + expected := inf.NewDec(test.val, test.scale) + if nn1 != 1 || err1 != nil || nn2 != 1 || err2 != nil { + t.Errorf("#%d (input '%s') error %d %v, %d %v", i, test.in, nn1, err1, nn2, err2) + continue + } + if n1.Cmp(expected) != 0 { + t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n1, test.val) + } + if n2.Cmp(expected) != 0 { + t.Errorf("#%d (input '%s') got: %s want: %d", i, test.in, n2, test.val) + } + } +} + +var decScanNextTests = []struct { + in string + ok bool + next rune +}{ + {"", false, 0}, + {"a", false, 'a'}, + {"z", false, 'z'}, + {"+", false, 0}, + {"-", false, 0}, + {"g", false, 'g'}, + {".", false, 0}, + {".-0", false, '-'}, + {".+0", false, '+'}, + {"0b", true, 'b'}, + {"0x", true, 'x'}, + {"0xg", true, 'x'}, + {"0.0g", true, 'g'}, +} + +func TestDecScanNext(t *testing.T) { + for i, test := range decScanNextTests { + rdr := strings.NewReader(test.in) + n1 := new(inf.Dec) + nn1, _ := fmt.Fscan(rdr, n1) + if (test.ok && nn1 == 0) || (!test.ok && nn1 > 0) { + t.Errorf("#%d (input '%s') ok incorrect should be %t", i, test.in, test.ok) + continue + } + r := rune(0) + nn2, err := fmt.Fscanf(rdr, "%c", &r) + if test.next != r { + t.Errorf("#%d (input '%s') next incorrect, got %c should be %c, %d, %v", i, test.in, r, test.next, nn2, err) + } + } +} + +var decGobEncodingTests = []string{ + "0", + "1", + "2", + "10", + "42", + "1234567890", + "298472983472983471903246121093472394872319615612417471234712061", +} + +func TestDecGobEncoding(t *testing.T) { + var medium bytes.Buffer + enc := gob.NewEncoder(&medium) + dec := gob.NewDecoder(&medium) + for i, test := range decGobEncodingTests { + for j := 0; j < 2; j++ { + for k := inf.Scale(-5); k <= 5; k++ { + medium.Reset() // empty buffer for each test case (in case of failures) + stest := test + if j != 0 { + // negative numbers + stest = "-" + test + } + var tx inf.Dec + tx.SetString(stest) + tx.SetScale(k) // test with positive, negative, and zero scale + if err := enc.Encode(&tx); err != nil { + t.Errorf("#%d%c: encoding failed: %s", i, 'a'+j, err) + } + var rx inf.Dec + if err := dec.Decode(&rx); err != nil { + t.Errorf("#%d%c: decoding failed: %s", i, 'a'+j, err) + } + if rx.Cmp(&tx) != 0 { + t.Errorf("#%d%c: transmission failed: got %s want %s", i, 'a'+j, &rx, &tx) + } + } + } + } +} diff --git a/vendor/gopkg.in/inf.v0/example_test.go b/vendor/gopkg.in/inf.v0/example_test.go new file mode 100644 index 0000000000..fa1e54d16e --- /dev/null +++ b/vendor/gopkg.in/inf.v0/example_test.go @@ -0,0 +1,62 @@ +package inf_test + +import ( + "fmt" + "log" +) + +import "gopkg.in/inf.v0" + +func ExampleDec_SetString() { + d := new(inf.Dec) + d.SetString("012345.67890") // decimal; leading 0 ignored; trailing 0 kept + fmt.Println(d) + // Output: 12345.67890 +} + +func ExampleDec_Scan() { + // The Scan function is rarely used directly; + // the fmt package recognizes it as an implementation of fmt.Scanner. + d := new(inf.Dec) + _, err := fmt.Sscan("184467440.73709551617", d) + if err != nil { + log.Println("error scanning value:", err) + } else { + fmt.Println(d) + } + // Output: 184467440.73709551617 +} + +func ExampleDec_QuoRound_scale2RoundDown() { + // 10 / 3 is an infinite decimal; it has no exact Dec representation + x, y := inf.NewDec(10, 0), inf.NewDec(3, 0) + // use 2 digits beyond the decimal point, round towards 0 + z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundDown) + fmt.Println(z) + // Output: 3.33 +} + +func ExampleDec_QuoRound_scale2RoundCeil() { + // -42 / 400 is an finite decimal with 3 digits beyond the decimal point + x, y := inf.NewDec(-42, 0), inf.NewDec(400, 0) + // use 2 digits beyond decimal point, round towards positive infinity + z := new(inf.Dec).QuoRound(x, y, 2, inf.RoundCeil) + fmt.Println(z) + // Output: -0.10 +} + +func ExampleDec_QuoExact_ok() { + // 1 / 25 is a finite decimal; it has exact Dec representation + x, y := inf.NewDec(1, 0), inf.NewDec(25, 0) + z := new(inf.Dec).QuoExact(x, y) + fmt.Println(z) + // Output: 0.04 +} + +func ExampleDec_QuoExact_fail() { + // 1 / 3 is an infinite decimal; it has no exact Dec representation + x, y := inf.NewDec(1, 0), inf.NewDec(3, 0) + z := new(inf.Dec).QuoExact(x, y) + fmt.Println(z) + // Output: +} diff --git a/vendor/gopkg.in/inf.v0/rounder_example_test.go b/vendor/gopkg.in/inf.v0/rounder_example_test.go new file mode 100644 index 0000000000..803c1d7ee5 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/rounder_example_test.go @@ -0,0 +1,72 @@ +package inf_test + +import ( + "fmt" + "os" + "text/tabwriter" + + "gopkg.in/inf.v0" +) + +// This example displays the results of Dec.Round with each of the Rounders. +// +func ExampleRounder() { + var vals = []struct { + x string + s inf.Scale + }{ + {"-0.18", 1}, {"-0.15", 1}, {"-0.12", 1}, {"-0.10", 1}, + {"-0.08", 1}, {"-0.05", 1}, {"-0.02", 1}, {"0.00", 1}, + {"0.02", 1}, {"0.05", 1}, {"0.08", 1}, {"0.10", 1}, + {"0.12", 1}, {"0.15", 1}, {"0.18", 1}, + } + + var rounders = []struct { + name string + rounder inf.Rounder + }{ + {"RoundDown", inf.RoundDown}, {"RoundUp", inf.RoundUp}, + {"RoundCeil", inf.RoundCeil}, {"RoundFloor", inf.RoundFloor}, + {"RoundHalfDown", inf.RoundHalfDown}, {"RoundHalfUp", inf.RoundHalfUp}, + {"RoundHalfEven", inf.RoundHalfEven}, {"RoundExact", inf.RoundExact}, + } + + fmt.Println("The results of new(inf.Dec).Round(x, s, inf.RoundXXX):\n") + w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight) + fmt.Fprint(w, "x\ts\t|\t") + for _, r := range rounders { + fmt.Fprintf(w, "%s\t", r.name[5:]) + } + fmt.Fprintln(w) + for _, v := range vals { + fmt.Fprintf(w, "%s\t%d\t|\t", v.x, v.s) + for _, r := range rounders { + x, _ := new(inf.Dec).SetString(v.x) + z := new(inf.Dec).Round(x, v.s, r.rounder) + fmt.Fprintf(w, "%d\t", z) + } + fmt.Fprintln(w) + } + w.Flush() + + // Output: + // The results of new(inf.Dec).Round(x, s, inf.RoundXXX): + // + // x s | Down Up Ceil Floor HalfDown HalfUp HalfEven Exact + // -0.18 1 | -0.1 -0.2 -0.1 -0.2 -0.2 -0.2 -0.2 + // -0.15 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.2 -0.2 + // -0.12 1 | -0.1 -0.2 -0.1 -0.2 -0.1 -0.1 -0.1 + // -0.10 1 | -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 -0.1 + // -0.08 1 | 0.0 -0.1 0.0 -0.1 -0.1 -0.1 -0.1 + // -0.05 1 | 0.0 -0.1 0.0 -0.1 0.0 -0.1 0.0 + // -0.02 1 | 0.0 -0.1 0.0 -0.1 0.0 0.0 0.0 + // 0.00 1 | 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 + // 0.02 1 | 0.0 0.1 0.1 0.0 0.0 0.0 0.0 + // 0.05 1 | 0.0 0.1 0.1 0.0 0.0 0.1 0.0 + // 0.08 1 | 0.0 0.1 0.1 0.0 0.1 0.1 0.1 + // 0.10 1 | 0.1 0.1 0.1 0.1 0.1 0.1 0.1 0.1 + // 0.12 1 | 0.1 0.2 0.2 0.1 0.1 0.1 0.1 + // 0.15 1 | 0.1 0.2 0.2 0.1 0.1 0.2 0.2 + // 0.18 1 | 0.1 0.2 0.2 0.1 0.2 0.2 0.2 + +} diff --git a/vendor/gopkg.in/inf.v0/rounder_test.go b/vendor/gopkg.in/inf.v0/rounder_test.go new file mode 100644 index 0000000000..d7e14c58c6 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/rounder_test.go @@ -0,0 +1,109 @@ +package inf_test + +import ( + "math/big" + "testing" + + "gopkg.in/inf.v0" +) + +var decRounderInputs = [...]struct { + quo *inf.Dec + rA, rB *big.Int +}{ + // examples from go language spec + {inf.NewDec(1, 0), big.NewInt(2), big.NewInt(3)}, // 5 / 3 + {inf.NewDec(-1, 0), big.NewInt(-2), big.NewInt(3)}, // -5 / 3 + {inf.NewDec(-1, 0), big.NewInt(2), big.NewInt(-3)}, // 5 / -3 + {inf.NewDec(1, 0), big.NewInt(-2), big.NewInt(-3)}, // -5 / -3 + // examples from godoc + {inf.NewDec(-1, 1), big.NewInt(-8), big.NewInt(10)}, + {inf.NewDec(-1, 1), big.NewInt(-5), big.NewInt(10)}, + {inf.NewDec(-1, 1), big.NewInt(-2), big.NewInt(10)}, + {inf.NewDec(0, 1), big.NewInt(-8), big.NewInt(10)}, + {inf.NewDec(0, 1), big.NewInt(-5), big.NewInt(10)}, + {inf.NewDec(0, 1), big.NewInt(-2), big.NewInt(10)}, + {inf.NewDec(0, 1), big.NewInt(0), big.NewInt(1)}, + {inf.NewDec(0, 1), big.NewInt(2), big.NewInt(10)}, + {inf.NewDec(0, 1), big.NewInt(5), big.NewInt(10)}, + {inf.NewDec(0, 1), big.NewInt(8), big.NewInt(10)}, + {inf.NewDec(1, 1), big.NewInt(2), big.NewInt(10)}, + {inf.NewDec(1, 1), big.NewInt(5), big.NewInt(10)}, + {inf.NewDec(1, 1), big.NewInt(8), big.NewInt(10)}, +} + +var decRounderResults = [...]struct { + rounder inf.Rounder + results [len(decRounderInputs)]*inf.Dec +}{ + {inf.RoundExact, [...]*inf.Dec{nil, nil, nil, nil, + nil, nil, nil, nil, nil, nil, + inf.NewDec(0, 1), nil, nil, nil, nil, nil, nil}}, + {inf.RoundDown, [...]*inf.Dec{ + inf.NewDec(1, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(1, 0), + inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1), + inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1), + inf.NewDec(0, 1), + inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1), + inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}}, + {inf.RoundUp, [...]*inf.Dec{ + inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0), + inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1), + inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1), + inf.NewDec(0, 1), + inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1), + inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}}, + {inf.RoundHalfDown, [...]*inf.Dec{ + inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0), + inf.NewDec(-2, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1), + inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1), + inf.NewDec(0, 1), + inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1), + inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(2, 1)}}, + {inf.RoundHalfUp, [...]*inf.Dec{ + inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0), + inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1), + inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(0, 1), + inf.NewDec(0, 1), + inf.NewDec(0, 1), inf.NewDec(1, 1), inf.NewDec(1, 1), + inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}}, + {inf.RoundHalfEven, [...]*inf.Dec{ + inf.NewDec(2, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(2, 0), + inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-1, 1), + inf.NewDec(-1, 1), inf.NewDec(0, 1), inf.NewDec(0, 1), + inf.NewDec(0, 1), + inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(1, 1), + inf.NewDec(1, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}}, + {inf.RoundFloor, [...]*inf.Dec{ + inf.NewDec(1, 0), inf.NewDec(-2, 0), inf.NewDec(-2, 0), inf.NewDec(1, 0), + inf.NewDec(-2, 1), inf.NewDec(-2, 1), inf.NewDec(-2, 1), + inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1), + inf.NewDec(0, 1), + inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1), + inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1)}}, + {inf.RoundCeil, [...]*inf.Dec{ + inf.NewDec(2, 0), inf.NewDec(-1, 0), inf.NewDec(-1, 0), inf.NewDec(2, 0), + inf.NewDec(-1, 1), inf.NewDec(-1, 1), inf.NewDec(-1, 1), + inf.NewDec(0, 1), inf.NewDec(0, 1), inf.NewDec(0, 1), + inf.NewDec(0, 1), + inf.NewDec(1, 1), inf.NewDec(1, 1), inf.NewDec(1, 1), + inf.NewDec(2, 1), inf.NewDec(2, 1), inf.NewDec(2, 1)}}, +} + +func TestDecRounders(t *testing.T) { + for i, a := range decRounderResults { + for j, input := range decRounderInputs { + q := new(inf.Dec).Set(input.quo) + rA, rB := new(big.Int).Set(input.rA), new(big.Int).Set(input.rB) + res := a.rounder.Round(new(inf.Dec), q, rA, rB) + if a.results[j] == nil && res == nil { + continue + } + if (a.results[j] == nil && res != nil) || + (a.results[j] != nil && res == nil) || + a.results[j].Cmp(res) != 0 { + t.Errorf("#%d,%d Rounder got %v; expected %v", i, j, res, a.results[j]) + } + } + } +} diff --git a/vendor/gopkg.in/ini.v1/.gitignore b/vendor/gopkg.in/ini.v1/.gitignore new file mode 100644 index 0000000000..c5203bf6e7 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.gitignore @@ -0,0 +1,5 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea diff --git a/vendor/gopkg.in/ini.v1/.travis.yml b/vendor/gopkg.in/ini.v1/.travis.yml new file mode 100644 index 0000000000..0064ba1d7c --- /dev/null +++ b/vendor/gopkg.in/ini.v1/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go get -v github.com/smartystreets/goconvey + - go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/vendor/gopkg.in/ini.v1/ini_test.go b/vendor/gopkg.in/ini.v1/ini_test.go new file mode 100644 index 0000000000..66a5098324 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/ini_test.go @@ -0,0 +1,401 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +const _CONF_DATA = ` +; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +# Information about package author +# Bio can be written in multiple lines. +[author] +NAME = Unknwon ; Succeeding comment +E-MAIL = fake@localhost +GITHUB = https://github.com/%(NAME)s +BIO = """Gopher. +Coding addict. +Good man. +""" # Succeeding comment + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +UNUSED_KEY = should be deleted + +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values + +[types] +STRING = str +BOOL = true +BOOL_FALSE = false +FLOAT64 = 1.25 +INT = 10 +TIME = 2015-01-01T20:17:05Z +DURATION = 2h45m +UINT = 3 + +[array] +STRINGS = en, zh, de +FLOAT64S = 1.1, 2.2, 3.3 +INTS = 1, 2, 3 +UINTS = 1, 2, 3 +TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] +empty_lines = next line is empty\ + +; Comment before the section +[comments] ; This is a comment for the section too +; Comment before key +key = "value" +key2 = "value2" ; This is a comment for key2 +key3 = "one", "two", "three" + +[advance] +value with quotes = "some value" +value quote2 again = 'some value' +true = 2+3=5 +"1+1=2" = true +"""6+1=7""" = true +"""` + "`" + `5+5` + "`" + `""" = 10 +` + "`" + `"6+6"` + "`" + ` = 12 +` + "`" + `7-2=4` + "`" + ` = false +ADDRESS = ` + "`" + `404 road, +NotFound, State, 50000` + "`" + ` + +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 \ +` + +func Test_Load(t *testing.T) { + Convey("Load from data sources", t, func() { + + Convey("Load with empty data", func() { + So(Empty(), ShouldNotBeNil) + }) + + Convey("Load with multiple data sources", func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + f, err := Load([]byte(_CONF_DATA), "testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + }) + }) + + Convey("Bad load process", t, func() { + + Convey("Load from invalid data sources", func() { + _, err := Load(_CONF_DATA) + So(err, ShouldNotBeNil) + + f, err := Load("testdata/404.ini") + So(err, ShouldNotBeNil) + So(f, ShouldBeNil) + + _, err = Load(1) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(""), 1) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad section name", func() { + _, err := Load([]byte("[]")) + So(err, ShouldNotBeNil) + + _, err = Load([]byte("[")) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad keys", func() { + _, err := Load([]byte(`"""name`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`"""name"""`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`""=1`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`=`)) + So(err, ShouldNotBeNil) + + _, err = Load([]byte(`name`)) + So(err, ShouldNotBeNil) + }) + + Convey("Load with bad values", func() { + _, err := Load([]byte(`name="""Unknwon`)) + So(err, ShouldNotBeNil) + }) + }) + + Convey("Get section and key insensitively", t, func() { + cfg, err := InsensitiveLoad([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + sec, err := cfg.GetSection("Author") + So(err, ShouldBeNil) + So(sec, ShouldNotBeNil) + + key, err := sec.GetKey("E-mail") + So(err, ShouldBeNil) + So(key, ShouldNotBeNil) + }) + + Convey("Load with ignoring continuation lines", t, func() { + cfg, err := LoadSources(LoadOptions{IgnoreContinuation: true}, []byte(`key1=a\b\ +key2=c\d\`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Section("").Key("key1").String(), ShouldEqual, `a\b\`) + So(cfg.Section("").Key("key2").String(), ShouldEqual, `c\d\`) + }) + + Convey("Load with boolean type keys", t, func() { + cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, []byte(`key1=hello +key2`)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Section("").Key("key2").MustBool(false), ShouldBeTrue) + + var buf bytes.Buffer + cfg.WriteTo(&buf) + So(buf.String(), ShouldEqual, `key1 = hello +key2 +`) + }) +} + +func Test_LooseLoad(t *testing.T) { + Convey("Loose load from data sources", t, func() { + Convey("Loose load mixed with nonexistent file", func() { + cfg, err := LooseLoad("testdata/404.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + var fake struct { + Name string `ini:"name"` + } + So(cfg.MapTo(&fake), ShouldBeNil) + + cfg, err = LooseLoad([]byte("name=Unknwon"), "testdata/404.ini") + So(err, ShouldBeNil) + So(cfg.Section("").Key("name").String(), ShouldEqual, "Unknwon") + So(cfg.MapTo(&fake), ShouldBeNil) + So(fake.Name, ShouldEqual, "Unknwon") + }) + }) + +} + +func Test_File_Append(t *testing.T) { + Convey("Append data sources", t, func() { + cfg, err := Load([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.Append([]byte(""), []byte("")), ShouldBeNil) + + Convey("Append bad data sources", func() { + So(cfg.Append(1), ShouldNotBeNil) + So(cfg.Append([]byte(""), 1), ShouldNotBeNil) + }) + }) +} + +func Test_File_WriteTo(t *testing.T) { + Convey("Write to somewhere", t, func() { + var buf bytes.Buffer + cfg := Empty() + cfg.WriteTo(&buf) + }) +} + +func Test_File_SaveTo_WriteTo(t *testing.T) { + Convey("Save file", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.Section("").Key("NAME").Comment = "Package name" + cfg.Section("author").Comment = `Information about package author +# Bio can be written in multiple lines.` + cfg.Section("advanced").Key("val w/ pound").SetValue("my#password") + cfg.Section("advanced").Key("longest key has a colon : yes/no").SetValue("yes") + So(cfg.SaveTo("testdata/conf_out.ini"), ShouldBeNil) + + cfg.Section("author").Key("NAME").Comment = "This is author name" + + So(cfg.SaveToIndent("testdata/conf_out.ini", "\t"), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteToIndent(&buf, "\t") + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `; Package name +NAME = ini +; Package version +VERSION = v1 +; Package import path +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +; Information about package author +# Bio can be written in multiple lines. +[author] + ; This is author name + NAME = Unknwon + E-MAIL = u@gogs.io + GITHUB = https://github.com/%(NAME)s + # Succeeding comment + BIO = """Gopher. +Coding addict. +Good man. +""" + +[package] + CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] + UNUSED_KEY = should be deleted + +[features] + - = Support read/write comments of keys and sections + - = Support auto-increment of key names + - = Support load multiple files to overwrite key values + +[types] + STRING = str + BOOL = true + BOOL_FALSE = false + FLOAT64 = 1.25 + INT = 10 + TIME = 2015-01-01T20:17:05Z + DURATION = 2h45m + UINT = 3 + +[array] + STRINGS = en, zh, de + FLOAT64S = 1.1, 2.2, 3.3 + INTS = 1, 2, 3 + UINTS = 1, 2, 3 + TIMES = 2015-01-01T20:17:05Z,2015-01-01T20:17:05Z,2015-01-01T20:17:05Z + +[note] + empty_lines = next line is empty + +; Comment before the section +; This is a comment for the section too +[comments] + ; Comment before key + key = value + ; This is a comment for key2 + key2 = value2 + key3 = "one", "two", "three" + +[advance] + value with quotes = some value + value quote2 again = some value + true = 2+3=5 + `+"`"+`1+1=2`+"`"+` = true + `+"`"+`6+1=7`+"`"+` = true + """`+"`"+`5+5`+"`"+`""" = 10 + `+"`"+`"6+6"`+"`"+` = 12 + `+"`"+`7-2=4`+"`"+` = false + ADDRESS = """404 road, +NotFound, State, 50000""" + two_lines = how about continuation lines? + lots_of_lines = 1 2 3 4 + +[advanced] + val w/ pound = `+"`"+`my#password`+"`"+` + `+"`"+`longest key has a colon : yes/no`+"`"+` = yes + +`) + }) +} + +// Helpers for slice tests. +func float64sEqual(values []float64, expected ...float64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func intsEqual(values []int, expected ...int) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func int64sEqual(values []int64, expected ...int64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func uintsEqual(values []uint, expected ...uint) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func uint64sEqual(values []uint64, expected ...uint64) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i], ShouldEqual, v) + } +} + +func timesEqual(values []time.Time, expected ...time.Time) { + So(values, ShouldHaveLength, len(expected)) + for i, v := range expected { + So(values[i].String(), ShouldEqual, v.String()) + } +} diff --git a/vendor/gopkg.in/ini.v1/key_test.go b/vendor/gopkg.in/ini.v1/key_test.go new file mode 100644 index 0000000000..39c62104c4 --- /dev/null +++ b/vendor/gopkg.in/ini.v1/key_test.go @@ -0,0 +1,537 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Key(t *testing.T) { + Convey("Test getting and setting values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get values in default section", func() { + sec := cfg.Section("") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").Value(), ShouldEqual, "ini") + So(sec.Key("NAME").String(), ShouldEqual, "ini") + So(sec.Key("NAME").Validate(func(in string) string { + return in + }), ShouldEqual, "ini") + So(sec.Key("NAME").Comment, ShouldEqual, "; Package name") + So(sec.Key("IMPORT_PATH").String(), ShouldEqual, "gopkg.in/ini.v1") + }) + + Convey("Get values in non-default section", func() { + sec := cfg.Section("author") + So(sec, ShouldNotBeNil) + So(sec.Key("NAME").String(), ShouldEqual, "Unknwon") + So(sec.Key("GITHUB").String(), ShouldEqual, "https://github.com/Unknwon") + + sec = cfg.Section("package") + So(sec, ShouldNotBeNil) + So(sec.Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get auto-increment key names", func() { + keys := cfg.Section("features").Keys() + for i, k := range keys { + So(k.Name(), ShouldEqual, fmt.Sprintf("#%d", i+1)) + } + }) + + Convey("Get parent-keys that are available to the child section", func() { + parentKeys := cfg.Section("package.sub").ParentKeys() + for _, k := range parentKeys { + So(k.Name(), ShouldEqual, "CLONE_URL") + } + }) + + Convey("Get overwrite value", func() { + So(cfg.Section("author").Key("E-MAIL").String(), ShouldEqual, "u@gogs.io") + }) + + Convey("Get sections", func() { + sections := cfg.Sections() + for i, name := range []string{DEFAULT_SECTION, "author", "package", "package.sub", "features", "types", "array", "note", "comments", "advance"} { + So(sections[i].Name(), ShouldEqual, name) + } + }) + + Convey("Get parent section value", func() { + So(cfg.Section("package.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + So(cfg.Section("package.fake.sub").Key("CLONE_URL").String(), ShouldEqual, "https://gopkg.in/ini.v1") + }) + + Convey("Get multiple line value", func() { + So(cfg.Section("author").Key("BIO").String(), ShouldEqual, "Gopher.\nCoding addict.\nGood man.\n") + }) + + Convey("Get values with type", func() { + sec := cfg.Section("types") + v1, err := sec.Key("BOOL").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeTrue) + + v1, err = sec.Key("BOOL_FALSE").Bool() + So(err, ShouldBeNil) + So(v1, ShouldBeFalse) + + v2, err := sec.Key("FLOAT64").Float64() + So(err, ShouldBeNil) + So(v2, ShouldEqual, 1.25) + + v3, err := sec.Key("INT").Int() + So(err, ShouldBeNil) + So(v3, ShouldEqual, 10) + + v4, err := sec.Key("INT").Int64() + So(err, ShouldBeNil) + So(v4, ShouldEqual, 10) + + v5, err := sec.Key("UINT").Uint() + So(err, ShouldBeNil) + So(v5, ShouldEqual, 3) + + v6, err := sec.Key("UINT").Uint64() + So(err, ShouldBeNil) + So(v6, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + v7, err := sec.Key("TIME").Time() + So(err, ShouldBeNil) + So(v7.String(), ShouldEqual, t.String()) + + Convey("Must get values with type", func() { + So(sec.Key("STRING").MustString("404"), ShouldEqual, "str") + So(sec.Key("BOOL").MustBool(), ShouldBeTrue) + So(sec.Key("FLOAT64").MustFloat64(), ShouldEqual, 1.25) + So(sec.Key("INT").MustInt(), ShouldEqual, 10) + So(sec.Key("INT").MustInt64(), ShouldEqual, 10) + So(sec.Key("UINT").MustUint(), ShouldEqual, 3) + So(sec.Key("UINT").MustUint64(), ShouldEqual, 3) + So(sec.Key("TIME").MustTime().String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(sec.Key("DURATION").MustDuration().Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must get values with default value", func() { + So(sec.Key("STRING_404").MustString("404"), ShouldEqual, "404") + So(sec.Key("BOOL_404").MustBool(true), ShouldBeTrue) + So(sec.Key("FLOAT64_404").MustFloat64(2.5), ShouldEqual, 2.5) + So(sec.Key("INT_404").MustInt(15), ShouldEqual, 15) + So(sec.Key("INT64_404").MustInt64(15), ShouldEqual, 15) + So(sec.Key("UINT_404").MustUint(6), ShouldEqual, 6) + So(sec.Key("UINT64_404").MustUint64(6), ShouldEqual, 6) + + t, err := time.Parse(time.RFC3339, "2014-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME_404").MustTime(t).String(), ShouldEqual, t.String()) + + So(sec.Key("DURATION_404").MustDuration(dur).Seconds(), ShouldEqual, dur.Seconds()) + + Convey("Must should set default as key value", func() { + So(sec.Key("STRING_404").String(), ShouldEqual, "404") + So(sec.Key("BOOL_404").String(), ShouldEqual, "true") + So(sec.Key("FLOAT64_404").String(), ShouldEqual, "2.5") + So(sec.Key("INT_404").String(), ShouldEqual, "15") + So(sec.Key("INT64_404").String(), ShouldEqual, "15") + So(sec.Key("UINT_404").String(), ShouldEqual, "6") + So(sec.Key("UINT64_404").String(), ShouldEqual, "6") + So(sec.Key("TIME_404").String(), ShouldEqual, "2014-01-01T20:17:05Z") + So(sec.Key("DURATION_404").String(), ShouldEqual, "2h45m0s") + }) + }) + }) + }) + + Convey("Get value with candidates", func() { + sec := cfg.Section("types") + So(sec.Key("STRING").In("", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64").InFloat64(0, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT").InInt(0, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT").InInt64(0, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT").InUint(0, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT").InUint64(0, []uint64{3, 6, 9}), ShouldEqual, 3) + + zt, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").InTime(zt, []time.Time{t, time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + + Convey("Get value with candidates and default value", func() { + So(sec.Key("STRING_404").In("str", []string{"str", "arr", "types"}), ShouldEqual, "str") + So(sec.Key("FLOAT64_404").InFloat64(1.25, []float64{1.25, 2.5, 3.75}), ShouldEqual, 1.25) + So(sec.Key("INT_404").InInt(10, []int{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("INT64_404").InInt64(10, []int64{10, 20, 30}), ShouldEqual, 10) + So(sec.Key("UINT_404").InUint(3, []uint{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("UINT_404").InUint64(3, []uint64{3, 6, 9}), ShouldEqual, 3) + So(sec.Key("TIME_404").InTime(t, []time.Time{time.Now(), time.Now(), time.Now().Add(1 * time.Second)}).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values in range", func() { + sec := cfg.Section("types") + So(sec.Key("FLOAT64").RangeFloat64(0, 1, 2), ShouldEqual, 1.25) + So(sec.Key("INT").RangeInt(0, 10, 20), ShouldEqual, 10) + So(sec.Key("INT").RangeInt64(0, 10, 20), ShouldEqual, 10) + + minT, err := time.Parse(time.RFC3339, "0001-01-01T01:00:00Z") + So(err, ShouldBeNil) + midT, err := time.Parse(time.RFC3339, "2013-01-01T01:00:00Z") + So(err, ShouldBeNil) + maxT, err := time.Parse(time.RFC3339, "9999-01-01T01:00:00Z") + So(err, ShouldBeNil) + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + So(sec.Key("TIME").RangeTime(t, minT, maxT).String(), ShouldEqual, t.String()) + + Convey("Get value in range with default value", func() { + So(sec.Key("FLOAT64").RangeFloat64(5, 0, 1), ShouldEqual, 5) + So(sec.Key("INT").RangeInt(7, 0, 5), ShouldEqual, 7) + So(sec.Key("INT").RangeInt64(7, 0, 5), ShouldEqual, 7) + So(sec.Key("TIME").RangeTime(t, minT, midT).String(), ShouldEqual, t.String()) + }) + }) + + Convey("Get values into slice", func() { + sec := cfg.Section("array") + So(strings.Join(sec.Key("STRINGS").Strings(","), ","), ShouldEqual, "en,zh,de") + So(len(sec.Key("STRINGS_404").Strings(",")), ShouldEqual, 0) + + vals1 := sec.Key("FLOAT64S").Float64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").Ints(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").Int64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").Uints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").Uint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").Times(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get valid values into slice", func() { + sec := cfg.Section("array") + vals1 := sec.Key("FLOAT64S").ValidFloat64s(",") + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2 := sec.Key("INTS").ValidInts(",") + intsEqual(vals2, 1, 2, 3) + + vals3 := sec.Key("INTS").ValidInt64s(",") + int64sEqual(vals3, 1, 2, 3) + + vals4 := sec.Key("UINTS").ValidUints(",") + uintsEqual(vals4, 1, 2, 3) + + vals5 := sec.Key("UINTS").ValidUint64s(",") + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6 := sec.Key("TIMES").ValidTimes(",") + timesEqual(vals6, t, t, t) + }) + + Convey("Get values one type into slice of another type", func() { + sec := cfg.Section("array") + vals1 := sec.Key("STRINGS").ValidFloat64s(",") + So(vals1, ShouldBeEmpty) + + vals2 := sec.Key("STRINGS").ValidInts(",") + So(vals2, ShouldBeEmpty) + + vals3 := sec.Key("STRINGS").ValidInt64s(",") + So(vals3, ShouldBeEmpty) + + vals4 := sec.Key("STRINGS").ValidUints(",") + So(vals4, ShouldBeEmpty) + + vals5 := sec.Key("STRINGS").ValidUint64s(",") + So(vals5, ShouldBeEmpty) + + vals6 := sec.Key("STRINGS").ValidTimes(",") + So(vals6, ShouldBeEmpty) + }) + + Convey("Get valid values into slice without errors", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("FLOAT64S").StrictFloat64s(",") + So(err, ShouldBeNil) + float64sEqual(vals1, 1.1, 2.2, 3.3) + + vals2, err := sec.Key("INTS").StrictInts(",") + So(err, ShouldBeNil) + intsEqual(vals2, 1, 2, 3) + + vals3, err := sec.Key("INTS").StrictInt64s(",") + So(err, ShouldBeNil) + int64sEqual(vals3, 1, 2, 3) + + vals4, err := sec.Key("UINTS").StrictUints(",") + So(err, ShouldBeNil) + uintsEqual(vals4, 1, 2, 3) + + vals5, err := sec.Key("UINTS").StrictUint64s(",") + So(err, ShouldBeNil) + uint64sEqual(vals5, 1, 2, 3) + + t, err := time.Parse(time.RFC3339, "2015-01-01T20:17:05Z") + So(err, ShouldBeNil) + vals6, err := sec.Key("TIMES").StrictTimes(",") + So(err, ShouldBeNil) + timesEqual(vals6, t, t, t) + }) + + Convey("Get invalid values into slice", func() { + sec := cfg.Section("array") + vals1, err := sec.Key("STRINGS").StrictFloat64s(",") + So(vals1, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals2, err := sec.Key("STRINGS").StrictInts(",") + So(vals2, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals3, err := sec.Key("STRINGS").StrictInt64s(",") + So(vals3, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals4, err := sec.Key("STRINGS").StrictUints(",") + So(vals4, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals5, err := sec.Key("STRINGS").StrictUint64s(",") + So(vals5, ShouldBeEmpty) + So(err, ShouldNotBeNil) + + vals6, err := sec.Key("STRINGS").StrictTimes(",") + So(vals6, ShouldBeEmpty) + So(err, ShouldNotBeNil) + }) + + Convey("Get key hash", func() { + cfg.Section("").KeysHash() + }) + + Convey("Set key value", func() { + k := cfg.Section("author").Key("NAME") + k.SetValue("无闻") + So(k.String(), ShouldEqual, "无闻") + }) + + Convey("Get key strings", func() { + So(strings.Join(cfg.Section("types").KeyStrings(), ","), ShouldEqual, "STRING,BOOL,BOOL_FALSE,FLOAT64,INT,TIME,DURATION,UINT") + }) + + Convey("Delete a key", func() { + cfg.Section("package.sub").DeleteKey("UNUSED_KEY") + _, err := cfg.Section("package.sub").GetKey("UNUSED_KEY") + So(err, ShouldNotBeNil) + }) + + Convey("Has Key (backwards compatible)", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.Haskey("UNUSED_KEY") + haskey2 := sec.Haskey("CLONE_URL") + haskey3 := sec.Haskey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Key", func() { + sec := cfg.Section("package.sub") + haskey1 := sec.HasKey("UNUSED_KEY") + haskey2 := sec.HasKey("CLONE_URL") + haskey3 := sec.HasKey("CLONE_URL_NO") + So(haskey1, ShouldBeTrue) + So(haskey2, ShouldBeTrue) + So(haskey3, ShouldBeFalse) + }) + + Convey("Has Value", func() { + sec := cfg.Section("author") + hasvalue1 := sec.HasValue("Unknwon") + hasvalue2 := sec.HasValue("doc") + So(hasvalue1, ShouldBeTrue) + So(hasvalue2, ShouldBeFalse) + }) + }) + + Convey("Test getting and setting bad values", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Create new key with empty name", func() { + k, err := cfg.Section("").NewKey("", "") + So(err, ShouldNotBeNil) + So(k, ShouldBeNil) + }) + + Convey("Create new section with empty name", func() { + s, err := cfg.NewSection("") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + }) + + Convey("Create new sections with empty name", func() { + So(cfg.NewSections(""), ShouldNotBeNil) + }) + + Convey("Get section that not exists", func() { + s, err := cfg.GetSection("404") + So(err, ShouldNotBeNil) + So(s, ShouldBeNil) + + s = cfg.Section("404") + So(s, ShouldNotBeNil) + }) + }) + + Convey("Test key hash clone", t, func() { + cfg, err := Load([]byte(strings.Replace("network=tcp,addr=127.0.0.1:6379,db=4,pool_size=100,idle_timeout=180", ",", "\n", -1))) + So(err, ShouldBeNil) + for _, v := range cfg.Section("").KeysHash() { + So(len(v), ShouldBeGreaterThan, 0) + } + }) + + Convey("Key has empty value", t, func() { + _conf := `key1= +key2= ; comment` + cfg, err := Load([]byte(_conf)) + So(err, ShouldBeNil) + So(cfg.Section("").Key("key1").Value(), ShouldBeEmpty) + }) +} + +func newTestFile(block bool) *File { + c, _ := Load([]byte(_CONF_DATA)) + c.BlockMode = block + return c +} + +func Benchmark_Key_Value(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").Value() + } +} + +func Benchmark_Key_Value_Direct(b *testing.B) { + c := newTestFile(true) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_Value_Direct_NonBlock(b *testing.B) { + c := newTestFile(false) + key := c.Section("").Key("NAME") + for i := 0; i < b.N; i++ { + key.Value() + } +} + +func Benchmark_Key_String(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_NonBlock(b *testing.B) { + c := newTestFile(false) + for i := 0; i < b.N; i++ { + _ = c.Section("").Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_String_ViaSection_NonBlock(b *testing.B) { + c := newTestFile(false) + sec := c.Section("") + for i := 0; i < b.N; i++ { + _ = sec.Key("NAME").String() + } +} + +func Benchmark_Key_SetValue(b *testing.B) { + c := newTestFile(true) + for i := 0; i < b.N; i++ { + c.Section("").Key("NAME").SetValue("10") + } +} + +func Benchmark_Key_SetValue_VisSection(b *testing.B) { + c := newTestFile(true) + sec := c.Section("") + for i := 0; i < b.N; i++ { + sec.Key("NAME").SetValue("10") + } +} diff --git a/vendor/gopkg.in/ini.v1/section_test.go b/vendor/gopkg.in/ini.v1/section_test.go new file mode 100644 index 0000000000..c6efb2c45a --- /dev/null +++ b/vendor/gopkg.in/ini.v1/section_test.go @@ -0,0 +1,47 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Section(t *testing.T) { + Convey("Test CRD sections", t, func() { + cfg, err := Load([]byte(_CONF_DATA), "testdata/conf.ini") + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + Convey("Get section strings", func() { + So(strings.Join(cfg.SectionStrings(), ","), ShouldEqual, "DEFAULT,author,package,package.sub,features,types,array,note,comments,advance") + }) + + Convey("Delete a section", func() { + cfg.DeleteSection("") + So(cfg.SectionStrings()[0], ShouldNotEqual, DEFAULT_SECTION) + }) + + Convey("Create new sections", func() { + cfg.NewSections("test", "test2") + _, err := cfg.GetSection("test") + So(err, ShouldBeNil) + _, err = cfg.GetSection("test2") + So(err, ShouldBeNil) + }) + }) +} diff --git a/vendor/gopkg.in/ini.v1/struct_test.go b/vendor/gopkg.in/ini.v1/struct_test.go new file mode 100644 index 0000000000..b1515d9f8e --- /dev/null +++ b/vendor/gopkg.in/ini.v1/struct_test.go @@ -0,0 +1,323 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "fmt" + "strings" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type testNested struct { + Cities []string `delim:"|"` + Visits []time.Time + Years []int + Numbers []int64 + Ages []uint + Populations []uint64 + Coordinates []float64 + Note string + Unused int `ini:"-"` +} + +type testEmbeded struct { + GPA float64 +} + +type testStruct struct { + Name string `ini:"NAME"` + Age int + Male bool + Money float64 + Born time.Time + Time time.Duration `ini:"Duration"` + Others testNested + *testEmbeded `ini:"grade"` + Unused int `ini:"-"` + Unsigned uint + Omitted bool `ini:"omitthis,omitempty"` +} + +const _CONF_DATA_STRUCT = ` +NAME = Unknwon +Age = 21 +Male = true +Money = 1.25 +Born = 1993-10-07T20:17:05Z +Duration = 2h45m +Unsigned = 3 +omitthis = true + +[Others] +Cities = HangZhou|Boston +Visits = 1993-10-07T20:17:05Z, 1993-10-07T20:17:05Z +Years = 1993,1994 +Numbers = 10010,10086 +Ages = 18,19 +Populations = 12345678,98765432 +Coordinates = 192.168,10.11 +Note = Hello world! + +[grade] +GPA = 2.8 + +[foo.bar] +Here = there +When = then +` + +type unsupport struct { + Byte byte +} + +type unsupport2 struct { + Others struct { + Cities byte + } +} + +type unsupport3 struct { + Cities byte +} + +type unsupport4 struct { + *unsupport3 `ini:"Others"` +} + +type defaultValue struct { + Name string + Age int + Male bool + Money float64 + Born time.Time + Cities []string +} + +type fooBar struct { + Here, When string +} + +const _INVALID_DATA_CONF_STRUCT = ` +Name = +Age = age +Male = 123 +Money = money +Born = nil +Cities = +` + +func Test_Struct(t *testing.T) { + Convey("Map to struct", t, func() { + Convey("Map file to struct", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Name, ShouldEqual, "Unknwon") + So(ts.Age, ShouldEqual, 21) + So(ts.Male, ShouldBeTrue) + So(ts.Money, ShouldEqual, 1.25) + So(ts.Unsigned, ShouldEqual, 3) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + So(ts.Born.String(), ShouldEqual, t.String()) + + dur, err := time.ParseDuration("2h45m") + So(err, ShouldBeNil) + So(ts.Time.Seconds(), ShouldEqual, dur.Seconds()) + + So(strings.Join(ts.Others.Cities, ","), ShouldEqual, "HangZhou,Boston") + So(ts.Others.Visits[0].String(), ShouldEqual, t.String()) + So(fmt.Sprint(ts.Others.Years), ShouldEqual, "[1993 1994]") + So(fmt.Sprint(ts.Others.Numbers), ShouldEqual, "[10010 10086]") + So(fmt.Sprint(ts.Others.Ages), ShouldEqual, "[18 19]") + So(fmt.Sprint(ts.Others.Populations), ShouldEqual, "[12345678 98765432]") + So(fmt.Sprint(ts.Others.Coordinates), ShouldEqual, "[192.168 10.11]") + So(ts.Others.Note, ShouldEqual, "Hello world!") + So(ts.testEmbeded.GPA, ShouldEqual, 2.8) + }) + + Convey("Map section to struct", func() { + foobar := new(fooBar) + f, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + + So(f.Section("foo.bar").MapTo(foobar), ShouldBeNil) + So(foobar.Here, ShouldEqual, "there") + So(foobar.When, ShouldEqual, "then") + }) + + Convey("Map to non-pointer struct", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + So(cfg.MapTo(testStruct{}), ShouldNotBeNil) + }) + + Convey("Map to unsupported type", func() { + cfg, err := Load([]byte(_CONF_DATA_STRUCT)) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = func(raw string) string { + if raw == "Byte" { + return "NAME" + } + return raw + } + So(cfg.MapTo(&unsupport{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport2{}), ShouldNotBeNil) + So(cfg.MapTo(&unsupport4{}), ShouldNotBeNil) + }) + + Convey("Map to omitempty field", func() { + ts := new(testStruct) + So(MapTo(ts, []byte(_CONF_DATA_STRUCT)), ShouldBeNil) + + So(ts.Omitted, ShouldEqual, true) + }) + + Convey("Map from invalid data source", func() { + So(MapTo(&testStruct{}, "hi"), ShouldNotBeNil) + }) + + Convey("Map to wrong types and gain default values", func() { + cfg, err := Load([]byte(_INVALID_DATA_CONF_STRUCT)) + So(err, ShouldBeNil) + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + dv := &defaultValue{"Joe", 10, true, 1.25, t, []string{"HangZhou", "Boston"}} + So(cfg.MapTo(dv), ShouldBeNil) + So(dv.Name, ShouldEqual, "Joe") + So(dv.Age, ShouldEqual, 10) + So(dv.Male, ShouldBeTrue) + So(dv.Money, ShouldEqual, 1.25) + So(dv.Born.String(), ShouldEqual, t.String()) + So(strings.Join(dv.Cities, ","), ShouldEqual, "HangZhou,Boston") + }) + }) + + Convey("Reflect from struct", t, func() { + type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string + Years []int + Numbers []int64 + Ages []uint + Populations []uint64 + Coordinates []float64 + None []int + } + type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + Height uint + GPA float64 + Date time.Time + NeverMind string `ini:"-"` + *Embeded `ini:"infos"` + } + + t, err := time.Parse(time.RFC3339, "1993-10-07T20:17:05Z") + So(err, ShouldBeNil) + a := &Author{"Unknwon", true, 21, 100, 2.8, t, "", + &Embeded{ + []time.Time{t, t}, + []string{"HangZhou", "Boston"}, + []int{1993, 1994}, + []int64{10010, 10086}, + []uint{18, 19}, + []uint64{12345678, 98765432}, + []float64{192.168, 10.11}, + []int{}, + }} + cfg := Empty() + So(ReflectFrom(cfg, a), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteTo(&buf) + So(err, ShouldBeNil) + So(buf.String(), ShouldEqual, `NAME = Unknwon +Male = true +Age = 21 +Height = 100 +GPA = 2.8 +Date = 1993-10-07T20:17:05Z + +[infos] +Dates = 1993-10-07T20:17:05Z|1993-10-07T20:17:05Z +Places = HangZhou,Boston +Years = 1993,1994 +Numbers = 10010,10086 +Ages = 18,19 +Populations = 12345678,98765432 +Coordinates = 192.168,10.11 +None = + +`) + + Convey("Reflect from non-point struct", func() { + So(ReflectFrom(cfg, Author{}), ShouldNotBeNil) + }) + + Convey("Reflect from struct with omitempty", func() { + cfg := Empty() + type SpecialStruct struct { + FirstName string `ini:"first_name"` + LastName string `ini:"last_name"` + JustOmitMe string `ini:"omitempty"` + LastLogin time.Time `ini:"last_login,omitempty"` + LastLogin2 time.Time `ini:",omitempty"` + NotEmpty int `ini:"omitempty"` + } + + So(ReflectFrom(cfg, &SpecialStruct{FirstName: "John", LastName: "Doe", NotEmpty: 9}), ShouldBeNil) + + var buf bytes.Buffer + _, err = cfg.WriteTo(&buf) + So(buf.String(), ShouldEqual, `first_name = John +last_name = Doe +omitempty = 9 + +`) + }) + }) +} + +type testMapper struct { + PackageName string +} + +func Test_NameGetter(t *testing.T) { + Convey("Test name mappers", t, func() { + So(MapToWithMapper(&testMapper{}, TitleUnderscore, []byte("packag_name=ini")), ShouldBeNil) + + cfg, err := Load([]byte("PACKAGE_NAME=ini")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + + cfg.NameMapper = AllCapsUnderscore + tg := new(testMapper) + So(cfg.MapTo(tg), ShouldBeNil) + So(tg.PackageName, ShouldEqual, "ini") + }) +} diff --git a/vendor/gopkg.in/macaron.v1/.gitignore b/vendor/gopkg.in/macaron.v1/.gitignore new file mode 100644 index 0000000000..fc5aca3e47 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/.gitignore @@ -0,0 +1,3 @@ +macaron.sublime-project +macaron.sublime-workspace +.idea diff --git a/vendor/gopkg.in/macaron.v1/.travis.yml b/vendor/gopkg.in/macaron.v1/.travis.yml new file mode 100644 index 0000000000..bdd2641e30 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/.travis.yml @@ -0,0 +1,13 @@ +sudo: false +language: go + +go: + - 1.5 + - 1.6 + - tip + +script: go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/vendor/gopkg.in/macaron.v1/context_test.go b/vendor/gopkg.in/macaron.v1/context_test.go new file mode 100644 index 0000000000..8331b2bb0b --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/context_test.go @@ -0,0 +1,375 @@ +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/Unknwon/com" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Context(t *testing.T) { + Convey("Do advanced encapsulation operations", t, func() { + m := Classic() + m.Use(Renderers(RenderOptions{ + Directory: "fixtures/basic", + }, "fixtures/basic2")) + + Convey("Get request body", func() { + m.Get("/body1", func(ctx *Context) { + data, err := ioutil.ReadAll(ctx.Req.Body().ReadCloser()) + So(err, ShouldBeNil) + So(string(data), ShouldEqual, "This is my request body") + }) + m.Get("/body2", func(ctx *Context) { + data, err := ctx.Req.Body().Bytes() + So(err, ShouldBeNil) + So(string(data), ShouldEqual, "This is my request body") + }) + m.Get("/body3", func(ctx *Context) { + data, err := ctx.Req.Body().String() + So(err, ShouldBeNil) + So(data, ShouldEqual, "This is my request body") + }) + + for i := 1; i <= 3; i++ { + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/body"+com.ToStr(i), nil) + req.Body = ioutil.NopCloser(bytes.NewBufferString("This is my request body")) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + } + }) + + Convey("Get remote IP address", func() { + m.Get("/remoteaddr", func(ctx *Context) string { + return ctx.RemoteAddr() + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/remoteaddr", nil) + req.RemoteAddr = "127.0.0.1:3333" + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "127.0.0.1") + }) + + Convey("Render HTML", func() { + + Convey("Normal HTML", func() { + m.Get("/html", func(ctx *Context) { + ctx.HTML(304, "hello", "Unknwon") // 304 for logger test. + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/html", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "

Hello Unknwon

") + }) + + Convey("HTML template set", func() { + m.Get("/html2", func(ctx *Context) { + ctx.Data["Name"] = "Unknwon" + ctx.HTMLSet(200, "basic2", "hello2") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/html2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "

Hello Unknwon

") + }) + + Convey("With layout", func() { + m.Get("/layout", func(ctx *Context) { + ctx.HTML(200, "hello", "Unknwon", HTMLOptions{"layout"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/layout", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "head

Hello Unknwon

foot") + }) + }) + + Convey("Parse from and query", func() { + m.Get("/query", func(ctx *Context) string { + var buf bytes.Buffer + buf.WriteString(ctx.QueryTrim("name") + " ") + buf.WriteString(ctx.QueryEscape("name") + " ") + buf.WriteString(com.ToStr(ctx.QueryBool("bool")) + " ") + buf.WriteString(com.ToStr(ctx.QueryInt("int")) + " ") + buf.WriteString(com.ToStr(ctx.QueryInt64("int64")) + " ") + buf.WriteString(com.ToStr(ctx.QueryFloat64("float64")) + " ") + return buf.String() + }) + m.Get("/query2", func(ctx *Context) string { + var buf bytes.Buffer + buf.WriteString(strings.Join(ctx.QueryStrings("list"), ",") + " ") + buf.WriteString(strings.Join(ctx.QueryStrings("404"), ",") + " ") + return buf.String() + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/query?name=Unknwon&bool=t&int=12&int64=123&float64=1.25", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Unknwon Unknwon true 12 123 1.25 ") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/query2?list=item1&list=item2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "item1,item2 ") + }) + + Convey("URL parameter", func() { + m.Get("/:name/:int/:int64/:float64", func(ctx *Context) string { + var buf bytes.Buffer + ctx.SetParams("name", ctx.Params("name")) + buf.WriteString(ctx.Params("")) + buf.WriteString(ctx.Params(":name") + " ") + buf.WriteString(ctx.ParamsEscape(":name") + " ") + buf.WriteString(com.ToStr(ctx.ParamsInt(":int")) + " ") + buf.WriteString(com.ToStr(ctx.ParamsInt64(":int64")) + " ") + buf.WriteString(com.ToStr(ctx.ParamsFloat64(":float64")) + " ") + return buf.String() + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/user/1/13/1.24", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "user user 1 13 1.24 ") + }) + + Convey("Get file", func() { + m.Post("/getfile", func(ctx *Context) { + ctx.Query("") + ctx.GetFile("hi") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("POST", "/getfile", nil) + So(err, ShouldBeNil) + req.Header.Set("Content-Type", "multipart/form-data") + m.ServeHTTP(resp, req) + }) + + Convey("Set and get cookie", func() { + m.Get("/set", func(ctx *Context) { + t, err := time.Parse(time.RFC1123, "Sun, 13 Mar 2016 01:29:26 UTC") + So(err, ShouldBeNil) + ctx.SetCookie("user", "Unknwon", 1, "/", "localhost", true, true, t) + ctx.SetCookie("user", "Unknwon", int32(1), "/", "localhost", 1) + ctx.SetCookie("user", "Unknwon", int64(1)) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/set", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Header().Get("Set-Cookie"), ShouldEqual, "user=Unknwon; Path=/; Domain=localhost; Expires=Sun, 13 Mar 2016 01:29:26 GMT; Max-Age=1; HttpOnly; Secure") + + m.Get("/get", func(ctx *Context) string { + ctx.GetCookie("404") + So(ctx.GetCookieInt("uid"), ShouldEqual, 1) + So(ctx.GetCookieInt64("uid"), ShouldEqual, 1) + So(ctx.GetCookieFloat64("balance"), ShouldEqual, 1.25) + return ctx.GetCookie("user") + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", "user=Unknwon; uid=1; balance=1.25") + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Unknwon") + }) + + Convey("Set and get secure cookie", func() { + m.SetDefaultCookieSecret("macaron") + m.Get("/set", func(ctx *Context) { + ctx.SetSecureCookie("user", "Unknwon", 1) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/set", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + cookie := resp.Header().Get("Set-Cookie") + + m.Get("/get", func(ctx *Context) string { + name, ok := ctx.GetSecureCookie("user") + So(ok, ShouldBeTrue) + return name + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + req.Header.Set("Cookie", cookie) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Unknwon") + }) + + Convey("Serve files", func() { + m.Get("/file", func(ctx *Context) { + ctx.ServeFile("fixtures/custom_funcs/index.tmpl") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/file", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") + + m.Get("/file2", func(ctx *Context) { + ctx.ServeFile("fixtures/custom_funcs/index.tmpl", "ok.tmpl") + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/file2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") + }) + + Convey("Serve file content", func() { + m.Get("/file", func(ctx *Context) { + ctx.ServeFileContent("fixtures/custom_funcs/index.tmpl") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/file", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") + + m.Get("/file2", func(ctx *Context) { + ctx.ServeFileContent("fixtures/custom_funcs/index.tmpl", "ok.tmpl") + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/file2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "{{ myCustomFunc }}") + + m.Get("/file3", func(ctx *Context) { + ctx.ServeFileContent("404.tmpl") + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/file3", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "open 404.tmpl: no such file or directory\n") + So(resp.Code, ShouldEqual, 500) + }) + + Convey("Serve content", func() { + m.Get("/content", func(ctx *Context) { + ctx.ServeContent("content1", bytes.NewReader([]byte("Hello world!"))) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/content", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Hello world!") + + m.Get("/content2", func(ctx *Context) { + ctx.ServeContent("content1", bytes.NewReader([]byte("Hello world!")), time.Now()) + }) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/content2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Hello world!") + }) + }) +} + +func Test_Context_Render(t *testing.T) { + Convey("Invalid render", t, func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + + m := New() + m.Get("/", func(ctx *Context) { + ctx.HTML(200, "hey") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + }) +} + +func Test_Context_Redirect(t *testing.T) { + Convey("Context with default redirect", t, func() { + url, err := url.Parse("http://localhost/path/one") + So(err, ShouldBeNil) + resp := httptest.NewRecorder() + req := http.Request{ + Method: "GET", + URL: url, + } + ctx := &Context{ + Req: Request{&req}, + Resp: NewResponseWriter(resp), + Data: make(map[string]interface{}), + } + ctx.Redirect("two") + + So(resp.Code, ShouldEqual, http.StatusFound) + So(resp.HeaderMap["Location"][0], ShouldEqual, "/path/two") + }) + + Convey("Context with custom redirect", t, func() { + url, err := url.Parse("http://localhost/path/one") + So(err, ShouldBeNil) + resp := httptest.NewRecorder() + req := http.Request{ + Method: "GET", + URL: url, + } + ctx := &Context{ + Req: Request{&req}, + Resp: NewResponseWriter(resp), + Data: make(map[string]interface{}), + } + ctx.Redirect("two", 307) + + So(resp.Code, ShouldEqual, http.StatusTemporaryRedirect) + So(resp.HeaderMap["Location"][0], ShouldEqual, "/path/two") + }) +} diff --git a/vendor/gopkg.in/macaron.v1/logger_test.go b/vendor/gopkg.in/macaron.v1/logger_test.go new file mode 100644 index 0000000000..def86810a9 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/logger_test.go @@ -0,0 +1,67 @@ +// Copyright 2013 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "bytes" + "log" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Unknwon/com" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Logger(t *testing.T) { + Convey("Global logger", t, func() { + buf := bytes.NewBufferString("") + m := New() + m.Map(log.New(buf, "[Macaron] ", 0)) + m.Use(Logger()) + m.Use(func(res http.ResponseWriter) { + res.WriteHeader(http.StatusNotFound) + }) + m.Get("/", func() {}) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, http.StatusNotFound) + So(len(buf.String()), ShouldBeGreaterThan, 0) + }) + + if ColorLog { + Convey("Color console output", t, func() { + m := Classic() + m.Get("/:code:int", func(ctx *Context) (int, string) { + return ctx.ParamsInt(":code"), "" + }) + + // Just for testing if logger would capture. + codes := []int{200, 201, 202, 301, 302, 304, 401, 403, 404, 500} + for _, code := range codes { + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/"+com.ToStr(code), nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, code) + } + }) + } +} diff --git a/vendor/gopkg.in/macaron.v1/macaron_test.go b/vendor/gopkg.in/macaron.v1/macaron_test.go new file mode 100644 index 0000000000..5aee0c50fc --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/macaron_test.go @@ -0,0 +1,218 @@ +// Copyright 2013 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Version(t *testing.T) { + Convey("Get version", t, func() { + So(Version(), ShouldEqual, _VERSION) + }) +} + +func Test_New(t *testing.T) { + Convey("Initialize a new instance", t, func() { + So(New(), ShouldNotBeNil) + }) + + Convey("Just test that Run doesn't bomb", t, func() { + go New().Run() + time.Sleep(1 * time.Second) + os.Setenv("PORT", "4001") + go New().Run("0.0.0.0") + go New().Run(4002) + go New().Run("0.0.0.0", 4003) + }) +} + +func Test_Macaron_Before(t *testing.T) { + Convey("Register before handlers", t, func() { + m := New() + m.Before(func(rw http.ResponseWriter, req *http.Request) bool { + return false + }) + m.Before(func(rw http.ResponseWriter, req *http.Request) bool { + return true + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + }) +} + +func Test_Macaron_ServeHTTP(t *testing.T) { + Convey("Serve HTTP requests", t, func() { + result := "" + m := New() + m.Use(func(c *Context) { + result += "foo" + c.Next() + result += "ban" + }) + m.Use(func(c *Context) { + result += "bar" + c.Next() + result += "baz" + }) + m.Get("/", func() {}) + m.Action(func(res http.ResponseWriter, req *http.Request) { + result += "bat" + res.WriteHeader(http.StatusBadRequest) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(result, ShouldEqual, "foobarbatbazban") + So(resp.Code, ShouldEqual, http.StatusBadRequest) + }) +} + +func Test_Macaron_Handlers(t *testing.T) { + Convey("Add custom handlers", t, func() { + result := "" + batman := func(c *Context) { + result += "batman!" + } + + m := New() + m.Use(func(c *Context) { + result += "foo" + c.Next() + result += "ban" + }) + m.Handlers( + batman, + batman, + batman, + ) + + Convey("Add not callable function", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.Use("shit") + }) + + m.Get("/", func() {}) + m.Action(func(res http.ResponseWriter, req *http.Request) { + result += "bat" + res.WriteHeader(http.StatusBadRequest) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(result, ShouldEqual, "batman!batman!batman!bat") + So(resp.Code, ShouldEqual, http.StatusBadRequest) + }) +} + +func Test_Macaron_EarlyWrite(t *testing.T) { + Convey("Write early content to response", t, func() { + result := "" + m := New() + m.Use(func(res http.ResponseWriter) { + result += "foobar" + res.Write([]byte("Hello world")) + }) + m.Use(func() { + result += "bat" + }) + m.Get("/", func() {}) + m.Action(func(res http.ResponseWriter) { + result += "baz" + res.WriteHeader(http.StatusBadRequest) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(result, ShouldEqual, "foobar") + So(resp.Code, ShouldEqual, http.StatusOK) + }) +} + +func Test_Macaron_Written(t *testing.T) { + Convey("Written sign", t, func() { + resp := httptest.NewRecorder() + m := New() + m.Handlers(func(res http.ResponseWriter) { + res.WriteHeader(http.StatusOK) + }) + + ctx := m.createContext(resp, &http.Request{Method: "GET"}) + So(ctx.Written(), ShouldBeFalse) + + ctx.run() + So(ctx.Written(), ShouldBeTrue) + }) +} + +func Test_Macaron_Basic_NoRace(t *testing.T) { + Convey("Make sure no race between requests", t, func() { + m := New() + handlers := []Handler{func() {}, func() {}} + // Ensure append will not realloc to trigger the race condition + m.handlers = handlers[:1] + m.Get("/", func() {}) + for i := 0; i < 2; i++ { + go func() { + req, _ := http.NewRequest("GET", "/", nil) + resp := httptest.NewRecorder() + m.ServeHTTP(resp, req) + }() + } + }) +} + +func Test_SetENV(t *testing.T) { + Convey("Get and save environment variable", t, func() { + tests := []struct { + in string + out string + }{ + {"", "development"}, + {"not_development", "not_development"}, + } + + for _, test := range tests { + setENV(test.in) + So(Env, ShouldEqual, test.out) + } + }) +} + +func Test_Config(t *testing.T) { + Convey("Set and get configuration object", t, func() { + So(Config(), ShouldNotBeNil) + cfg, err := SetConfig([]byte("")) + So(err, ShouldBeNil) + So(cfg, ShouldNotBeNil) + }) +} diff --git a/vendor/gopkg.in/macaron.v1/recovery_test.go b/vendor/gopkg.in/macaron.v1/recovery_test.go new file mode 100644 index 0000000000..111d749ac3 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/recovery_test.go @@ -0,0 +1,74 @@ +// Copyright 2013 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "bytes" + "log" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Recovery(t *testing.T) { + Convey("Recovery from panic", t, func() { + buf := bytes.NewBufferString("") + setENV(DEV) + + m := New() + m.Map(log.New(buf, "[Macaron] ", 0)) + m.Use(func(res http.ResponseWriter, req *http.Request) { + res.Header().Set("Content-Type", "unpredictable") + }) + m.Use(Recovery()) + m.Use(func(res http.ResponseWriter, req *http.Request) { + panic("here is a panic!") + }) + m.Get("/", func() {}) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, http.StatusInternalServerError) + So(resp.HeaderMap.Get("Content-Type"), ShouldEqual, "text/html") + So(buf.String(), ShouldNotBeEmpty) + }) + + Convey("Revocery panic to another response writer", t, func() { + resp := httptest.NewRecorder() + resp2 := httptest.NewRecorder() + setENV(DEV) + + m := New() + m.Use(Recovery()) + m.Use(func(c *Context) { + c.MapTo(resp2, (*http.ResponseWriter)(nil)) + panic("here is a panic!") + }) + m.Get("/", func() {}) + + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp2.Code, ShouldEqual, http.StatusInternalServerError) + So(resp2.HeaderMap.Get("Content-Type"), ShouldEqual, "text/html") + So(resp2.Body.Len(), ShouldBeGreaterThan, 0) + }) +} diff --git a/vendor/gopkg.in/macaron.v1/render_test.go b/vendor/gopkg.in/macaron.v1/render_test.go new file mode 100644 index 0000000000..1a1b713102 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/render_test.go @@ -0,0 +1,738 @@ +// Copyright 2013 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "encoding/xml" + "html/template" + "net/http" + "net/http/httptest" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type Greeting struct { + One string `json:"one"` + Two string `json:"two"` +} + +type GreetingXML struct { + XMLName xml.Name `xml:"greeting"` + One string `xml:"one,attr"` + Two string `xml:"two,attr"` +} + +func Test_Render_JSON(t *testing.T) { + Convey("Render JSON", t, func() { + m := Classic() + m.Use(Renderer()) + m.Get("/foobar", func(r Render) { + r.JSON(300, Greeting{"hello", "world"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusMultipleChoices) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_JSON+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, `{"one":"hello","two":"world"}`) + }) + + Convey("Render JSON with prefix", t, func() { + m := Classic() + prefix := ")]}',\n" + m.Use(Renderer(RenderOptions{ + PrefixJSON: []byte(prefix), + })) + m.Get("/foobar", func(r Render) { + r.JSON(300, Greeting{"hello", "world"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusMultipleChoices) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_JSON+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, prefix+`{"one":"hello","two":"world"}`) + }) + + Convey("Render Indented JSON", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + IndentJSON: true, + })) + m.Get("/foobar", func(r Render) { + r.JSON(300, Greeting{"hello", "world"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusMultipleChoices) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_JSON+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, `{ + "one": "hello", + "two": "world" +}`) + }) + + Convey("Render JSON and return string", t, func() { + m := Classic() + m.Use(Renderer()) + m.Get("/foobar", func(r Render) { + result, err := r.JSONString(Greeting{"hello", "world"}) + So(err, ShouldBeNil) + So(result, ShouldEqual, `{"one":"hello","two":"world"}`) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + }) + + Convey("Render with charset JSON", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Charset: "foobar", + })) + m.Get("/foobar", func(r Render) { + r.JSON(300, Greeting{"hello", "world"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusMultipleChoices) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_JSON+"; charset=foobar") + So(resp.Body.String(), ShouldEqual, `{"one":"hello","two":"world"}`) + }) +} + +func Test_Render_XML(t *testing.T) { + Convey("Render XML", t, func() { + m := Classic() + m.Use(Renderer()) + m.Get("/foobar", func(r Render) { + r.XML(300, GreetingXML{One: "hello", Two: "world"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusMultipleChoices) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_XML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, ``) + }) + + Convey("Render XML with prefix", t, func() { + m := Classic() + prefix := ")]}',\n" + m.Use(Renderer(RenderOptions{ + PrefixXML: []byte(prefix), + })) + m.Get("/foobar", func(r Render) { + r.XML(300, GreetingXML{One: "hello", Two: "world"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusMultipleChoices) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_XML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, prefix+``) + }) + + Convey("Render Indented XML", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + IndentXML: true, + })) + m.Get("/foobar", func(r Render) { + r.XML(300, GreetingXML{One: "hello", Two: "world"}) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusMultipleChoices) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_XML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, ``) + }) +} + +func Test_Render_HTML(t *testing.T) { + Convey("Render HTML", t, func() { + m := Classic() + m.Use(Renderers(RenderOptions{ + Directory: "fixtures/basic", + }, "fixtures/basic2")) + m.Get("/foobar", func(r Render) { + r.SetResponseWriter(r.(*TplRender).ResponseWriter) + r.HTML(200, "hello", "jeremy") + r.SetTemplatePath("", "fixtures/basic2") + }) + m.Get("/foobar2", func(r Render) { + if r.HasTemplateSet("basic2") { + r.HTMLSet(200, "basic2", "hello", "jeremy") + } + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_HTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "

Hello jeremy

") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/foobar2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_HTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "

What's up, jeremy

") + + Convey("Change render templates path", func() { + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_HTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "

What's up, jeremy

") + }) + }) + + Convey("Render HTML and return string", t, func() { + m := Classic() + m.Use(Renderers(RenderOptions{ + Directory: "fixtures/basic", + }, "basic2:fixtures/basic2")) + m.Get("/foobar", func(r Render) { + result, err := r.HTMLString("hello", "jeremy") + So(err, ShouldBeNil) + So(result, ShouldEqual, "

Hello jeremy

") + }) + m.Get("/foobar2", func(r Render) { + result, err := r.HTMLSetString("basic2", "hello", "jeremy") + So(err, ShouldBeNil) + So(result, ShouldEqual, "

What's up, jeremy

") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/foobar2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + }) + + Convey("Render with nested HTML", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "admin/index", "jeremy") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_HTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "

Admin jeremy

") + }) + + Convey("Render bad HTML", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "nope", nil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusInternalServerError) + So(resp.Body.String(), ShouldEqual, "html/template: \"nope\" is undefined\n") + }) + + Convey("Invalid template set", t, func() { + Convey("Empty template set argument", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + m := Classic() + m.Use(Renderers(RenderOptions{ + Directory: "fixtures/basic", + }, "")) + }) + + Convey("Bad template set path", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + m := Classic() + m.Use(Renderers(RenderOptions{ + Directory: "fixtures/basic", + }, "404")) + }) + }) +} + +func Test_Render_XHTML(t *testing.T) { + Convey("Render XHTML", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + HTMLContentType: _CONTENT_XHTML, + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "hello", "jeremy") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_XHTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "

Hello jeremy

") + }) +} + +func Test_Render_Extensions(t *testing.T) { + Convey("Render with extensions", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + Extensions: []string{".tmpl", ".html"}, + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "hypertext", nil) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_HTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "Hypertext!") + }) +} + +func Test_Render_Funcs(t *testing.T) { + Convey("Render with functions", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/custom_funcs", + Funcs: []template.FuncMap{ + { + "myCustomFunc": func() string { + return "My custom function" + }, + }, + }, + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "index", "jeremy") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Body.String(), ShouldEqual, "My custom function") + }) +} + +func Test_Render_Layout(t *testing.T) { + Convey("Render with layout", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + Layout: "layout", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "content", "jeremy") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Body.String(), ShouldEqual, "head

jeremy

foot") + }) + + Convey("Render with current layout", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + Layout: "current_layout", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "content", "jeremy") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Body.String(), ShouldEqual, "content head

jeremy

content foot") + }) + + Convey("Render with override layout", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + Layout: "layout", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "content", "jeremy", HTMLOptions{ + Layout: "another_layout", + }) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_HTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "another head

jeremy

another foot") + }) +} + +func Test_Render_Delimiters(t *testing.T) { + Convey("Render with delimiters", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Delims: Delims{"{[{", "}]}"}, + Directory: "fixtures/basic", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "delims", "jeremy") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_HTML+"; charset=UTF-8") + So(resp.Body.String(), ShouldEqual, "

Hello jeremy

") + }) +} + +func Test_Render_BinaryData(t *testing.T) { + Convey("Render binary data", t, func() { + m := Classic() + m.Use(Renderer()) + m.Get("/foobar", func(r Render) { + r.RawData(200, []byte("hello there")) + }) + m.Get("/foobar2", func(r Render) { + r.PlainText(200, []byte("hello there")) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_BINARY) + So(resp.Body.String(), ShouldEqual, "hello there") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/foobar2", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, _CONTENT_PLAIN) + So(resp.Body.String(), ShouldEqual, "hello there") + }) + + Convey("Render binary data with mime type", t, func() { + m := Classic() + m.Use(Renderer()) + m.Get("/foobar", func(r Render) { + r.(*TplRender).ResponseWriter.Header().Set(_CONTENT_TYPE, "image/jpeg") + r.RawData(200, []byte("..jpeg data..")) + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get(_CONTENT_TYPE), ShouldEqual, "image/jpeg") + So(resp.Body.String(), ShouldEqual, "..jpeg data..") + }) +} + +func Test_Render_Status(t *testing.T) { + Convey("Render with status 204", t, func() { + resp := httptest.NewRecorder() + r := TplRender{resp, NewTemplateSet(), &RenderOptions{}, "", time.Now()} + r.Status(204) + So(resp.Code, ShouldEqual, http.StatusNoContent) + }) + + Convey("Render with status 404", t, func() { + resp := httptest.NewRecorder() + r := TplRender{resp, NewTemplateSet(), &RenderOptions{}, "", time.Now()} + r.Error(404) + So(resp.Code, ShouldEqual, http.StatusNotFound) + }) + + Convey("Render with status 500", t, func() { + resp := httptest.NewRecorder() + r := TplRender{resp, NewTemplateSet(), &RenderOptions{}, "", time.Now()} + r.Error(500) + So(resp.Code, ShouldEqual, http.StatusInternalServerError) + }) +} + +func Test_Render_NoRace(t *testing.T) { + Convey("Make sure render has no race", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "hello", "world") + }) + + done := make(chan bool) + doreq := func() { + resp := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/foobar", nil) + m.ServeHTTP(resp, req) + done <- true + } + // Run two requests to check there is no race condition + go doreq() + go doreq() + <-done + <-done + }) +} + +func Test_Render_Symlink(t *testing.T) { + Convey("Render can follow symlinks", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/symlink", + })) + m.Get("/foobar", func(r Render) { + r.HTML(200, "hello", "world") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/foobar", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + }) +} + +func Test_Render_AppendDirectories(t *testing.T) { + Convey("Render with additional templates", t, func() { + m := Classic() + m.Use(Renderer(RenderOptions{ + Directory: "fixtures/basic", + AppendDirectories: []string{"fixtures/basic/custom"}, + })) + + Convey("Request normal template", func() { + m.Get("/normal", func(r Render) { + r.HTML(200, "content", "Macaron") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/normal", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Body.String(), ShouldEqual, "

Macaron

") + So(resp.Code, ShouldEqual, http.StatusOK) + }) + + Convey("Request overwritten template", func() { + m.Get("/custom", func(r Render) { + r.HTML(200, "hello", "world") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/custom", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Body.String(), ShouldEqual, "

This is custom version of: Hello world

") + So(resp.Code, ShouldEqual, http.StatusOK) + }) + + }) +} + +func Test_GetExt(t *testing.T) { + Convey("Get extension", t, func() { + So(GetExt("test"), ShouldBeBlank) + So(GetExt("test.tmpl"), ShouldEqual, ".tmpl") + So(GetExt("test.go.tmpl"), ShouldEqual, ".go.tmpl") + }) +} + +func Test_dummyRender(t *testing.T) { + shouldPanic := func() { So(recover(), ShouldNotBeNil) } + + Convey("Use dummy render to gracefully handle panic", t, func() { + m := New() + + performRequest := func(method, path string) { + resp := httptest.NewRecorder() + req, err := http.NewRequest(method, path, nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + } + + m.Get("/set_response_writer", func(ctx *Context) { + defer shouldPanic() + ctx.SetResponseWriter(nil) + }) + m.Get("/json", func(ctx *Context) { + defer shouldPanic() + ctx.JSON(0, nil) + }) + m.Get("/jsonstring", func(ctx *Context) { + defer shouldPanic() + ctx.JSONString(nil) + }) + m.Get("/rawdata", func(ctx *Context) { + defer shouldPanic() + ctx.RawData(0, nil) + }) + m.Get("/plaintext", func(ctx *Context) { + defer shouldPanic() + ctx.PlainText(0, nil) + }) + m.Get("/html", func(ctx *Context) { + defer shouldPanic() + ctx.Render.HTML(0, "", nil) + }) + m.Get("/htmlset", func(ctx *Context) { + defer shouldPanic() + ctx.Render.HTMLSet(0, "", "", nil) + }) + m.Get("/htmlsetstring", func(ctx *Context) { + defer shouldPanic() + ctx.Render.HTMLSetString("", "", nil) + }) + m.Get("/htmlstring", func(ctx *Context) { + defer shouldPanic() + ctx.Render.HTMLString("", nil) + }) + m.Get("/htmlsetbytes", func(ctx *Context) { + defer shouldPanic() + ctx.Render.HTMLSetBytes("", "", nil) + }) + m.Get("/htmlbytes", func(ctx *Context) { + defer shouldPanic() + ctx.Render.HTMLBytes("", nil) + }) + m.Get("/xml", func(ctx *Context) { + defer shouldPanic() + ctx.XML(0, nil) + }) + m.Get("/error", func(ctx *Context) { + defer shouldPanic() + ctx.Error(0) + }) + m.Get("/status", func(ctx *Context) { + defer shouldPanic() + ctx.Status(0) + }) + m.Get("/settemplatepath", func(ctx *Context) { + defer shouldPanic() + ctx.SetTemplatePath("", "") + }) + m.Get("/hastemplateset", func(ctx *Context) { + defer shouldPanic() + ctx.HasTemplateSet("") + }) + + performRequest("GET", "/set_response_writer") + performRequest("GET", "/json") + performRequest("GET", "/jsonstring") + performRequest("GET", "/rawdata") + performRequest("GET", "/jsonstring") + performRequest("GET", "/plaintext") + performRequest("GET", "/html") + performRequest("GET", "/htmlset") + performRequest("GET", "/htmlsetstring") + performRequest("GET", "/htmlstring") + performRequest("GET", "/htmlsetbytes") + performRequest("GET", "/htmlbytes") + performRequest("GET", "/xml") + performRequest("GET", "/error") + performRequest("GET", "/status") + performRequest("GET", "/settemplatepath") + performRequest("GET", "/hastemplateset") + }) +} diff --git a/vendor/gopkg.in/macaron.v1/response_writer_test.go b/vendor/gopkg.in/macaron.v1/response_writer_test.go new file mode 100644 index 0000000000..5b0baeb801 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/response_writer_test.go @@ -0,0 +1,188 @@ +// Copyright 2013 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "bufio" + "io" + "net" + "net/http" + "net/http/httptest" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +type closeNotifyingRecorder struct { + *httptest.ResponseRecorder + closed chan bool +} + +func newCloseNotifyingRecorder() *closeNotifyingRecorder { + return &closeNotifyingRecorder{ + httptest.NewRecorder(), + make(chan bool, 1), + } +} + +func (c *closeNotifyingRecorder) close() { + c.closed <- true +} + +func (c *closeNotifyingRecorder) CloseNotify() <-chan bool { + return c.closed +} + +type hijackableResponse struct { + Hijacked bool +} + +func newHijackableResponse() *hijackableResponse { + return &hijackableResponse{} +} + +func (h *hijackableResponse) Header() http.Header { return nil } +func (h *hijackableResponse) Write(buf []byte) (int, error) { return 0, nil } +func (h *hijackableResponse) WriteHeader(code int) {} +func (h *hijackableResponse) Flush() {} +func (h *hijackableResponse) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h.Hijacked = true + return nil, nil, nil +} + +func Test_ResponseWriter(t *testing.T) { + Convey("Write string to response writer", t, func() { + resp := httptest.NewRecorder() + rw := NewResponseWriter(resp) + rw.Write([]byte("Hello world")) + + So(resp.Code, ShouldEqual, rw.Status()) + So(resp.Body.String(), ShouldEqual, "Hello world") + So(rw.Status(), ShouldEqual, http.StatusOK) + So(rw.Size(), ShouldEqual, 11) + So(rw.Written(), ShouldBeTrue) + }) + + Convey("Write strings to response writer", t, func() { + resp := httptest.NewRecorder() + rw := NewResponseWriter(resp) + rw.Write([]byte("Hello world")) + rw.Write([]byte("foo bar bat baz")) + + So(resp.Code, ShouldEqual, rw.Status()) + So(resp.Body.String(), ShouldEqual, "Hello worldfoo bar bat baz") + So(rw.Status(), ShouldEqual, http.StatusOK) + So(rw.Size(), ShouldEqual, 26) + So(rw.Written(), ShouldBeTrue) + }) + + Convey("Write header to response writer", t, func() { + resp := httptest.NewRecorder() + rw := NewResponseWriter(resp) + rw.WriteHeader(http.StatusNotFound) + + So(resp.Code, ShouldEqual, rw.Status()) + So(resp.Body.String(), ShouldBeBlank) + So(rw.Status(), ShouldEqual, http.StatusNotFound) + So(rw.Size(), ShouldEqual, 0) + }) + + Convey("Write before response write", t, func() { + result := "" + resp := httptest.NewRecorder() + rw := NewResponseWriter(resp) + rw.Before(func(ResponseWriter) { + result += "foo" + }) + rw.Before(func(ResponseWriter) { + result += "bar" + }) + rw.WriteHeader(http.StatusNotFound) + + So(resp.Code, ShouldEqual, rw.Status()) + So(resp.Body.String(), ShouldBeBlank) + So(rw.Status(), ShouldEqual, http.StatusNotFound) + So(rw.Size(), ShouldEqual, 0) + So(result, ShouldEqual, "barfoo") + }) + + Convey("Response writer with Hijack", t, func() { + hijackable := newHijackableResponse() + rw := NewResponseWriter(hijackable) + hijacker, ok := rw.(http.Hijacker) + So(ok, ShouldBeTrue) + _, _, err := hijacker.Hijack() + So(err, ShouldBeNil) + So(hijackable.Hijacked, ShouldBeTrue) + }) + + Convey("Response writer with bad Hijack", t, func() { + hijackable := new(http.ResponseWriter) + rw := NewResponseWriter(*hijackable) + hijacker, ok := rw.(http.Hijacker) + So(ok, ShouldBeTrue) + _, _, err := hijacker.Hijack() + So(err, ShouldNotBeNil) + }) + + Convey("Response writer with close notify", t, func() { + resp := newCloseNotifyingRecorder() + rw := NewResponseWriter(resp) + closed := false + notifier := rw.(http.CloseNotifier).CloseNotify() + resp.close() + select { + case <-notifier: + closed = true + case <-time.After(time.Second): + } + So(closed, ShouldBeTrue) + }) + + Convey("Response writer with flusher", t, func() { + resp := httptest.NewRecorder() + rw := NewResponseWriter(resp) + _, ok := rw.(http.Flusher) + So(ok, ShouldBeTrue) + }) + + Convey("Response writer with flusher handler", t, func() { + m := Classic() + m.Get("/events", func(w http.ResponseWriter, r *http.Request) { + f, ok := w.(http.Flusher) + So(ok, ShouldBeTrue) + + w.Header().Set("Content-Type", "text/event-stream") + w.Header().Set("Cache-Control", "no-cache") + w.Header().Set("Connection", "keep-alive") + + for i := 0; i < 2; i++ { + time.Sleep(10 * time.Millisecond) + io.WriteString(w, "data: Hello\n\n") + f.Flush() + } + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/events", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Body.String(), ShouldEqual, "data: Hello\n\ndata: Hello\n\n") + }) +} diff --git a/vendor/gopkg.in/macaron.v1/return_handler_test.go b/vendor/gopkg.in/macaron.v1/return_handler_test.go new file mode 100644 index 0000000000..81569cf851 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/return_handler_test.go @@ -0,0 +1,102 @@ +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Return_Handler(t *testing.T) { + Convey("Return with status and body", t, func() { + m := New() + m.Get("/", func() (int, string) { + return 418, "i'm a teapot" + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusTeapot) + So(resp.Body.String(), ShouldEqual, "i'm a teapot") + }) + + Convey("Return with error", t, func() { + m := New() + m.Get("/", func() error { + return errors.New("what the hell!!!") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusInternalServerError) + So(resp.Body.String(), ShouldEqual, "what the hell!!!\n") + + Convey("Return with nil error", func() { + m := New() + m.Get("/", func() error { + return nil + }, func() (int, string) { + return 200, "Awesome" + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Body.String(), ShouldEqual, "Awesome") + }) + }) + + Convey("Return with pointer", t, func() { + m := New() + m.Get("/", func() *string { + str := "hello world" + return &str + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Body.String(), ShouldEqual, "hello world") + }) + + Convey("Return with byte slice", t, func() { + m := New() + m.Get("/", func() []byte { + return []byte("hello world") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Body.String(), ShouldEqual, "hello world") + }) +} diff --git a/vendor/gopkg.in/macaron.v1/router_test.go b/vendor/gopkg.in/macaron.v1/router_test.go new file mode 100644 index 0000000000..a8abd63348 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/router_test.go @@ -0,0 +1,309 @@ +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "errors" + "net/http" + "net/http/httptest" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Router_Handle(t *testing.T) { + Convey("Register all HTTP methods routes", t, func() { + m := New() + m.Get("/get", func() string { + return "GET" + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/get", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "GET") + + m.Patch("/patch", func() string { + return "PATCH" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("PATCH", "/patch", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "PATCH") + + m.Post("/post", func() string { + return "POST" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("POST", "/post", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "POST") + + m.Put("/put", func() string { + return "PUT" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("PUT", "/put", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "PUT") + + m.Delete("/delete", func() string { + return "DELETE" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("DELETE", "/delete", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "DELETE") + + m.Options("/options", func() string { + return "OPTIONS" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("OPTIONS", "/options", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "OPTIONS") + + m.Head("/head", func() string { + return "HEAD" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("HEAD", "/head", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "HEAD") + + m.Any("/any", func() string { + return "ANY" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "/any", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "ANY") + + m.Route("/route", "GET,POST", func() string { + return "ROUTE" + }) + resp = httptest.NewRecorder() + req, err = http.NewRequest("POST", "/route", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "ROUTE") + }) + + Convey("Register with or without auto head", t, func() { + Convey("Without auto head", func() { + m := New() + m.Get("/", func() string { + return "GET" + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("HEAD", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, 404) + }) + + Convey("With auto head", func() { + m := New() + m.SetAutoHead(true) + m.Get("/", func() string { + return "GET" + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("HEAD", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, 200) + }) + }) + + Convey("Register all HTTP methods routes with combo", t, func() { + m := New() + m.SetURLPrefix("/prefix") + m.Use(Renderer()) + m.Combo("/", func(ctx *Context) { + ctx.Data["prefix"] = "Prefix_" + }). + Get(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "GET" }). + Patch(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "PATCH" }). + Post(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "POST" }). + Put(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "PUT" }). + Delete(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "DELETE" }). + Options(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "OPTIONS" }). + Head(func(ctx *Context) string { return ctx.Data["prefix"].(string) + "HEAD" }) + + for name := range _HTTP_METHODS { + resp := httptest.NewRecorder() + req, err := http.NewRequest(name, "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Prefix_"+name) + } + + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.Combo("/").Get(func() {}).Get(nil) + }) + + Convey("Register duplicated routes", t, func() { + r := NewRouter() + r.Get("/") + r.Get("/") + }) + + Convey("Register invalid HTTP method", t, func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + r := NewRouter() + r.Handle("404", "/", nil) + }) +} + +func Test_Route_Name(t *testing.T) { + Convey("Set route name", t, func() { + m := New() + m.Get("/", func() {}).Name("home") + + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.Get("/", func() {}).Name("home") + }) + + Convey("Set combo router name", t, func() { + m := New() + m.Combo("/").Get(func() {}).Name("home") + + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.Combo("/").Name("home") + }) +} + +func Test_Router_URLFor(t *testing.T) { + Convey("Build URL path", t, func() { + m := New() + m.Get("/user/:id", func() {}).Name("user_id") + m.Get("/user/:id/:name", func() {}).Name("user_id_name") + m.Get("cms_:id_:page.html", func() {}).Name("id_page") + + So(m.URLFor("user_id", "id", "12"), ShouldEqual, "/user/12") + So(m.URLFor("user_id_name", "id", "12", "name", "unknwon"), ShouldEqual, "/user/12/unknwon") + So(m.URLFor("id_page", "id", "12", "page", "profile"), ShouldEqual, "/cms_12_profile.html") + + Convey("Number of pair values does not match", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.URLFor("user_id", "id") + }) + + Convey("Empty pair value", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.URLFor("user_id", "", "") + }) + + Convey("Empty route name", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.Get("/user/:id", func() {}).Name("") + }) + + Convey("Invalid route name", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + m.URLFor("404") + }) + }) +} + +func Test_Router_Group(t *testing.T) { + Convey("Register route group", t, func() { + m := New() + m.Group("/api", func() { + m.Group("/v1", func() { + m.Get("/list", func() string { + return "Well done!" + }) + }) + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/api/v1/list", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Well done!") + }) +} + +func Test_Router_NotFound(t *testing.T) { + Convey("Custom not found handler", t, func() { + m := New() + m.Get("/", func() {}) + m.NotFound(func() string { + return "Custom not found" + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/404", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "Custom not found") + }) +} + +func Test_Router_InternalServerError(t *testing.T) { + Convey("Custom internal server error handler", t, func() { + m := New() + m.Get("/", func() error { + return errors.New("Custom internal server error") + }) + m.InternalServerError(func(rw http.ResponseWriter, err error) { + rw.WriteHeader(500) + rw.Write([]byte(err.Error())) + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, 500) + So(resp.Body.String(), ShouldEqual, "Custom internal server error") + }) +} + +func Test_Router_splat(t *testing.T) { + Convey("Register router with glob", t, func() { + m := New() + m.Get("/*", func(ctx *Context) string { + return ctx.Params("*") + }) + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/hahaha", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Body.String(), ShouldEqual, "hahaha") + }) +} diff --git a/vendor/gopkg.in/macaron.v1/static_test.go b/vendor/gopkg.in/macaron.v1/static_test.go new file mode 100644 index 0000000000..cfa723a277 --- /dev/null +++ b/vendor/gopkg.in/macaron.v1/static_test.go @@ -0,0 +1,246 @@ +// Copyright 2013 Martini Authors +// Copyright 2014 The Macaron Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package macaron + +import ( + "bytes" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path" + "strings" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +var currentRoot, _ = os.Getwd() + +func Test_Static(t *testing.T) { + Convey("Serve static files", t, func() { + m := New() + m.Use(Static("./")) + + resp := httptest.NewRecorder() + resp.Body = new(bytes.Buffer) + req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get("Expires"), ShouldBeBlank) + So(resp.Body.Len(), ShouldBeGreaterThan, 0) + + Convey("Change static path", func() { + m.Get("/", func(ctx *Context) { + ctx.ChangeStaticPath("./", "fixtures/basic2") + }) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + resp = httptest.NewRecorder() + resp.Body = new(bytes.Buffer) + req, err = http.NewRequest("GET", "http://localhost:4000/hello.tmpl", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get("Expires"), ShouldBeBlank) + So(resp.Body.Len(), ShouldBeGreaterThan, 0) + }) + }) + + Convey("Serve static files with local path", t, func() { + Root = os.TempDir() + f, err := ioutil.TempFile(Root, "static_content") + So(err, ShouldBeNil) + f.WriteString("Expected Content") + f.Close() + + m := New() + m.Use(Static(".")) + + resp := httptest.NewRecorder() + resp.Body = new(bytes.Buffer) + req, err := http.NewRequest("GET", "http://localhost:4000/"+path.Base(strings.Replace(f.Name(), "\\", "/", -1)), nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Header().Get("Expires"), ShouldBeBlank) + So(resp.Body.String(), ShouldEqual, "Expected Content") + }) + + Convey("Serve static files with head", t, func() { + m := New() + m.Use(Static(currentRoot)) + + resp := httptest.NewRecorder() + resp.Body = new(bytes.Buffer) + req, err := http.NewRequest("HEAD", "http://localhost:4000/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, http.StatusOK) + So(resp.Body.Len(), ShouldEqual, 0) + }) + + Convey("Serve static files as post", t, func() { + m := New() + m.Use(Static(currentRoot)) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("POST", "http://localhost:4000/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldEqual, http.StatusNotFound) + }) + + Convey("Serve static files with bad directory", t, func() { + m := Classic() + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + So(resp.Code, ShouldNotEqual, http.StatusOK) + }) +} + +func Test_Static_Options(t *testing.T) { + Convey("Serve static files with options logging", t, func() { + var buf bytes.Buffer + m := NewWithLogger(&buf) + opt := StaticOptions{} + m.Use(Static(currentRoot, opt)) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") + + // Not disable logging. + m.Handlers() + buf.Reset() + opt.SkipLogging = true + m.Use(Static(currentRoot, opt)) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(buf.Len(), ShouldEqual, 0) + }) + + Convey("Serve static files with options serve index", t, func() { + var buf bytes.Buffer + m := NewWithLogger(&buf) + opt := StaticOptions{IndexFile: "macaron.go"} + m.Use(Static(currentRoot, opt)) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") + }) + + Convey("Serve static files with options prefix", t, func() { + var buf bytes.Buffer + m := NewWithLogger(&buf) + opt := StaticOptions{Prefix: "public"} + m.Use(Static(currentRoot, opt)) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/public/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") + }) + + Convey("Serve static files with options expires", t, func() { + var buf bytes.Buffer + m := NewWithLogger(&buf) + opt := StaticOptions{Expires: func() string { return "46" }} + m.Use(Static(currentRoot, opt)) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Header().Get("Expires"), ShouldEqual, "46") + }) +} + +func Test_Static_Redirect(t *testing.T) { + Convey("Serve static files with redirect", t, func() { + m := New() + m.Use(Static(currentRoot, StaticOptions{Prefix: "/public"})) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/public", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusFound) + So(resp.Header().Get("Location"), ShouldEqual, "/public/") + }) +} + +func Test_Statics(t *testing.T) { + Convey("Serve multiple static routers", t, func() { + Convey("Register empty directory", func() { + defer func() { + So(recover(), ShouldNotBeNil) + }() + + m := New() + m.Use(Statics(StaticOptions{})) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + }) + + Convey("Serve normally", func() { + var buf bytes.Buffer + m := NewWithLogger(&buf) + m.Use(Statics(StaticOptions{}, currentRoot, currentRoot+"/fixtures/basic")) + + resp := httptest.NewRecorder() + req, err := http.NewRequest("GET", "http://localhost:4000/macaron.go", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(buf.String(), ShouldEqual, "[Macaron] [Static] Serving /macaron.go\n") + + resp = httptest.NewRecorder() + req, err = http.NewRequest("GET", "http://localhost:4000/admin/index.tmpl", nil) + So(err, ShouldBeNil) + m.ServeHTTP(resp, req) + + So(resp.Code, ShouldEqual, http.StatusOK) + So(buf.String(), ShouldEndWith, "[Macaron] [Static] Serving /admin/index.tmpl\n") + }) + }) +} diff --git a/vendor/gopkg.in/raintank/schema.v0/event_gen_test.go b/vendor/gopkg.in/raintank/schema.v0/event_gen_test.go new file mode 100644 index 0000000000..74bc1aec9c --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v0/event_gen_test.go @@ -0,0 +1,125 @@ +package schema + +// NOTE: THIS FILE WAS PRODUCED BY THE +// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) +// DO NOT EDIT + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalProbeEvent(t *testing.T) { + v := ProbeEvent{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgProbeEvent(b *testing.B) { + v := ProbeEvent{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgProbeEvent(b *testing.B) { + v := ProbeEvent{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalProbeEvent(b *testing.B) { + v := ProbeEvent{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeProbeEvent(t *testing.T) { + v := ProbeEvent{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := ProbeEvent{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeProbeEvent(b *testing.B) { + v := ProbeEvent{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeProbeEvent(b *testing.B) { + v := ProbeEvent{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v0/metric_gen_test.go b/vendor/gopkg.in/raintank/schema.v0/metric_gen_test.go new file mode 100644 index 0000000000..955b2a6853 --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v0/metric_gen_test.go @@ -0,0 +1,351 @@ +package schema + +// NOTE: THIS FILE WAS PRODUCED BY THE +// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) +// DO NOT EDIT + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalMetricData(t *testing.T) { + v := MetricData{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetricData(b *testing.B) { + v := MetricData{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetricData(b *testing.B) { + v := MetricData{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetricData(b *testing.B) { + v := MetricData{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetricData(t *testing.T) { + v := MetricData{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := MetricData{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetricData(b *testing.B) { + v := MetricData{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetricData(b *testing.B) { + v := MetricData{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalMetricDataArray(t *testing.T) { + v := MetricDataArray{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetricDataArray(b *testing.B) { + v := MetricDataArray{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetricDataArray(b *testing.B) { + v := MetricDataArray{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetricDataArray(b *testing.B) { + v := MetricDataArray{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetricDataArray(t *testing.T) { + v := MetricDataArray{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := MetricDataArray{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetricDataArray(b *testing.B) { + v := MetricDataArray{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetricDataArray(b *testing.B) { + v := MetricDataArray{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalMetricDefinition(t *testing.T) { + v := MetricDefinition{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetricDefinition(b *testing.B) { + v := MetricDefinition{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetricDefinition(b *testing.B) { + v := MetricDefinition{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetricDefinition(b *testing.B) { + v := MetricDefinition{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetricDefinition(t *testing.T) { + v := MetricDefinition{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := MetricDefinition{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetricDefinition(b *testing.B) { + v := MetricDefinition{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetricDefinition(b *testing.B) { + v := MetricDefinition{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v0/metric_serialization_bench_test.go b/vendor/gopkg.in/raintank/schema.v0/metric_serialization_bench_test.go new file mode 100644 index 0000000000..03b877da33 --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v0/metric_serialization_bench_test.go @@ -0,0 +1,144 @@ +package schema + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "math/rand" + "strconv" + "testing" +) + +func getDifferentMetrics(amount int) []*MetricData { + names := []string{ + "litmus.http.error_state.", + "litmus.hello.dieter_plaetinck.be", + "litmus.ok.raintank_dns_error_state_foo_longer", + "hi.alerting.state", + } + intervals := []int{1, 10, 60} + tags := [][]string{ + { + "foo:bar", + "endpoint_id:25", + "collector_id:hi", + }, + { + "foo_bar:quux", + "endpoint_id:25", + "collector_id:hi", + "some_other_tag:ok", + }, + } + r := rand.New(rand.NewSource(438)) + out := make([]*MetricData, amount) + for i := 0; i < amount; i++ { + out[i] = &MetricData{ + OrgId: i, + Name: names[i%len(names)] + "foo.bar" + strconv.Itoa(i), + Metric: names[i%len(names)], + Interval: intervals[i%len(intervals)], + Value: r.Float64(), + Unit: "foo", + Time: r.Int63(), + TargetType: "bleh", + Tags: tags[i%len(tags)], + } + } + return out +} + +func BenchmarkSerialize3000MetricsJson(b *testing.B) { + metrics := getDifferentMetrics(3000) + b.ResetTimer() + var size int + for n := 0; n < b.N; n++ { + i, err := json.Marshal(metrics) + if err != nil { + panic(err) + } + size = len(i) + } + b.Log("final size:", size) +} + +func BenchmarkDeSerialize3000MetricsJson(b *testing.B) { + metrics := getDifferentMetrics(3000) + data, err := json.Marshal(metrics) + if err != nil { + panic(err) + } + out := make([]*MetricData, 0) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := json.Unmarshal(data, &out) + if err != nil { + panic(err) + } + } +} + +func BenchmarkSerialize3000MetricsGob(b *testing.B) { + metrics := getDifferentMetrics(3000) + var size int + b.ResetTimer() + for n := 0; n < b.N; n++ { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(metrics) + if err != nil { + panic(err) + } + size = buf.Len() + } + b.Log("final size:", size) +} +func BenchmarkDeSerialize3000MetricsGob(b *testing.B) { + metrics := getDifferentMetrics(3000) + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(metrics) + if err != nil { + } + out := make([]*MetricData, 0) + data := buf.Bytes() + b.ResetTimer() + for n := 0; n < b.N; n++ { + buf := bytes.NewBuffer(data) + dec := gob.NewDecoder(buf) + err := dec.Decode(&out) + if err != nil { + panic(err) + } + } +} + +func BenchmarkSerialize3000MetricsMsgp(b *testing.B) { + metrics := getDifferentMetrics(3000) + var size int + b.ResetTimer() + for n := 0; n < b.N; n++ { + m := MetricDataArray(metrics) + data, err := m.MarshalMsg(nil) + if err != nil { + panic(err) + } + size = len(data) + } + b.Log("final size:", size) +} +func BenchmarkDeSerialize3000MetricsMsgp(b *testing.B) { + metrics := getDifferentMetrics(3000) + m := MetricDataArray(metrics) + data, err := m.MarshalMsg(nil) + if err != nil { + } + var out MetricDataArray + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err := out.UnmarshalMsg(data) + if err != nil { + panic(err) + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v0/reslice_test.go b/vendor/gopkg.in/raintank/schema.v0/reslice_test.go new file mode 100644 index 0000000000..87ffad8bd7 --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v0/reslice_test.go @@ -0,0 +1,66 @@ +package schema + +import ( + "testing" +) + +type testCase struct { + inSize int + subSize int +} + +func TestReslice(t *testing.T) { + cases := []testCase{ + {10, 1}, + {10, 2}, + {10, 3}, + {10, 4}, + {10, 5}, + {10, 6}, + {10, 7}, + {10, 8}, + {10, 9}, + {10, 10}, + {10, 11}, + {100, 1}, + {100, 13}, + {100, 39}, + {100, 74}, + {100, 143}, + {100, 5000}, + } + for _, c := range cases { + in := make([]*MetricData, c.inSize) + for i := 0; i < c.inSize; i++ { + in[i] = &MetricData{OrgId: i} + } + out := Reslice(in, c.subSize) + expectedLen := len(in) / c.subSize + fullSubSlices := len(in) / c.subSize + if len(in)%c.subSize != 0 { + expectedLen += 1 + } + if len(out) != expectedLen { + t.Fatalf("case %#v: out array len expected %d, got %d", c, expectedLen, len(out)) + } + for i := 0; i < fullSubSlices; i++ { + if len(out[i]) != c.subSize { + t.Fatalf("out sub array %d len expected %d, got %d", i, c.subSize, len(out[i])) + } + } + lastSize := len(in) % c.subSize + if lastSize == 0 { + lastSize = c.subSize + } + if len(out[len(out)-1]) != lastSize { + t.Fatalf("out last sub array len expected %d, got %d", lastSize, len(out[len(out)-1])) + } + for i := 0; i < len(in); i++ { + subArray := i / c.subSize + subI := i % c.subSize + if in[i] != out[subArray][subI] { + t.Fatalf("element mismatch. in: %v, out: %v", in[i], out[subArray][subI]) + } + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v1/event.go b/vendor/gopkg.in/raintank/schema.v1/event.go deleted file mode 100644 index e88faa837a..0000000000 --- a/vendor/gopkg.in/raintank/schema.v1/event.go +++ /dev/null @@ -1,36 +0,0 @@ -package schema - -import ( - "errors" - "fmt" - "strings" -) - -var errInvalidEvent = errors.New("invalid event definition") -var errFmtInvalidSeverity = "invalid severity level %q" - -//go:generate msgp - -type ProbeEvent struct { - Id string `json:"id"` - EventType string `json:"event_type"` - OrgId int64 `json:"org_id"` - Severity string `json:"severity"` // enum "INFO" "WARN" "ERROR" "OK" - Source string `json:"source"` - Timestamp int64 `json:"timestamp"` - Message string `json:"message"` - Tags map[string]string `json:"tags"` -} - -func (e *ProbeEvent) Validate() error { - if e.EventType == "" || e.OrgId == 0 || e.Source == "" || e.Timestamp == 0 || e.Message == "" { - return errInvalidEvent - } - switch strings.ToLower(e.Severity) { - case "info", "ok", "warn", "error", "warning", "critical": - // nop - default: - return fmt.Errorf(errFmtInvalidSeverity, e.Severity) - } - return nil -} diff --git a/vendor/gopkg.in/raintank/schema.v1/event_gen.go b/vendor/gopkg.in/raintank/schema.v1/event_gen.go deleted file mode 100644 index c9c23bc803..0000000000 --- a/vendor/gopkg.in/raintank/schema.v1/event_gen.go +++ /dev/null @@ -1,319 +0,0 @@ -package schema - -// NOTE: THIS FILE WAS PRODUCED BY THE -// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) -// DO NOT EDIT - -import "github.com/tinylib/msgp/msgp" - -// DecodeMsg implements msgp.Decodable -func (z *ProbeEvent) DecodeMsg(dc *msgp.Reader) (err error) { - var field []byte - _ = field - var zbai uint32 - zbai, err = dc.ReadMapHeader() - if err != nil { - return - } - for zbai > 0 { - zbai-- - field, err = dc.ReadMapKeyPtr() - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "Id": - z.Id, err = dc.ReadString() - if err != nil { - return - } - case "EventType": - z.EventType, err = dc.ReadString() - if err != nil { - return - } - case "OrgId": - z.OrgId, err = dc.ReadInt64() - if err != nil { - return - } - case "Severity": - z.Severity, err = dc.ReadString() - if err != nil { - return - } - case "Source": - z.Source, err = dc.ReadString() - if err != nil { - return - } - case "Timestamp": - z.Timestamp, err = dc.ReadInt64() - if err != nil { - return - } - case "Message": - z.Message, err = dc.ReadString() - if err != nil { - return - } - case "Tags": - var zcmr uint32 - zcmr, err = dc.ReadMapHeader() - if err != nil { - return - } - if z.Tags == nil && zcmr > 0 { - z.Tags = make(map[string]string, zcmr) - } else if len(z.Tags) > 0 { - for key, _ := range z.Tags { - delete(z.Tags, key) - } - } - for zcmr > 0 { - zcmr-- - var zxvk string - var zbzg string - zxvk, err = dc.ReadString() - if err != nil { - return - } - zbzg, err = dc.ReadString() - if err != nil { - return - } - z.Tags[zxvk] = zbzg - } - default: - err = dc.Skip() - if err != nil { - return - } - } - } - return -} - -// EncodeMsg implements msgp.Encodable -func (z *ProbeEvent) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 8 - // write "Id" - err = en.Append(0x88, 0xa2, 0x49, 0x64) - if err != nil { - return err - } - err = en.WriteString(z.Id) - if err != nil { - return - } - // write "EventType" - err = en.Append(0xa9, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65) - if err != nil { - return err - } - err = en.WriteString(z.EventType) - if err != nil { - return - } - // write "OrgId" - err = en.Append(0xa5, 0x4f, 0x72, 0x67, 0x49, 0x64) - if err != nil { - return err - } - err = en.WriteInt64(z.OrgId) - if err != nil { - return - } - // write "Severity" - err = en.Append(0xa8, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79) - if err != nil { - return err - } - err = en.WriteString(z.Severity) - if err != nil { - return - } - // write "Source" - err = en.Append(0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) - if err != nil { - return err - } - err = en.WriteString(z.Source) - if err != nil { - return - } - // write "Timestamp" - err = en.Append(0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70) - if err != nil { - return err - } - err = en.WriteInt64(z.Timestamp) - if err != nil { - return - } - // write "Message" - err = en.Append(0xa7, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65) - if err != nil { - return err - } - err = en.WriteString(z.Message) - if err != nil { - return - } - // write "Tags" - err = en.Append(0xa4, 0x54, 0x61, 0x67, 0x73) - if err != nil { - return err - } - err = en.WriteMapHeader(uint32(len(z.Tags))) - if err != nil { - return - } - for zxvk, zbzg := range z.Tags { - err = en.WriteString(zxvk) - if err != nil { - return - } - err = en.WriteString(zbzg) - if err != nil { - return - } - } - return -} - -// MarshalMsg implements msgp.Marshaler -func (z *ProbeEvent) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 8 - // string "Id" - o = append(o, 0x88, 0xa2, 0x49, 0x64) - o = msgp.AppendString(o, z.Id) - // string "EventType" - o = append(o, 0xa9, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65) - o = msgp.AppendString(o, z.EventType) - // string "OrgId" - o = append(o, 0xa5, 0x4f, 0x72, 0x67, 0x49, 0x64) - o = msgp.AppendInt64(o, z.OrgId) - // string "Severity" - o = append(o, 0xa8, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79) - o = msgp.AppendString(o, z.Severity) - // string "Source" - o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65) - o = msgp.AppendString(o, z.Source) - // string "Timestamp" - o = append(o, 0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70) - o = msgp.AppendInt64(o, z.Timestamp) - // string "Message" - o = append(o, 0xa7, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65) - o = msgp.AppendString(o, z.Message) - // string "Tags" - o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73) - o = msgp.AppendMapHeader(o, uint32(len(z.Tags))) - for zxvk, zbzg := range z.Tags { - o = msgp.AppendString(o, zxvk) - o = msgp.AppendString(o, zbzg) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *ProbeEvent) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zajw uint32 - zajw, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - for zajw > 0 { - zajw-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - return - } - switch msgp.UnsafeString(field) { - case "Id": - z.Id, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - return - } - case "EventType": - z.EventType, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - return - } - case "OrgId": - z.OrgId, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - case "Severity": - z.Severity, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - return - } - case "Source": - z.Source, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - return - } - case "Timestamp": - z.Timestamp, bts, err = msgp.ReadInt64Bytes(bts) - if err != nil { - return - } - case "Message": - z.Message, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - return - } - case "Tags": - var zwht uint32 - zwht, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - return - } - if z.Tags == nil && zwht > 0 { - z.Tags = make(map[string]string, zwht) - } else if len(z.Tags) > 0 { - for key, _ := range z.Tags { - delete(z.Tags, key) - } - } - for zwht > 0 { - var zxvk string - var zbzg string - zwht-- - zxvk, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - return - } - zbzg, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - return - } - z.Tags[zxvk] = zbzg - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *ProbeEvent) Msgsize() (s int) { - s = 1 + 3 + msgp.StringPrefixSize + len(z.Id) + 10 + msgp.StringPrefixSize + len(z.EventType) + 6 + msgp.Int64Size + 9 + msgp.StringPrefixSize + len(z.Severity) + 7 + msgp.StringPrefixSize + len(z.Source) + 10 + msgp.Int64Size + 8 + msgp.StringPrefixSize + len(z.Message) + 5 + msgp.MapHeaderSize - if z.Tags != nil { - for zxvk, zbzg := range z.Tags { - _ = zbzg - s += msgp.StringPrefixSize + len(zxvk) + msgp.StringPrefixSize + len(zbzg) - } - } - return -} diff --git a/vendor/gopkg.in/raintank/schema.v1/metric.go b/vendor/gopkg.in/raintank/schema.v1/metric.go index a4a1bc5970..1de6dd1d1f 100644 --- a/vendor/gopkg.in/raintank/schema.v1/metric.go +++ b/vendor/gopkg.in/raintank/schema.v1/metric.go @@ -129,9 +129,8 @@ type MetricDefinition struct { LastUpdate int64 `json:"lastUpdate"` // unix timestamp Partition int32 `json:"partition"` - // this is a special attribute that does not need to be set when storing - // the struct or sending it over the network. the content of NameWithTags - // can be generated by calling the method DeduplicateNameWithTags(). + // this is a special attribute that does not need to be set, it is only used + // to keep the state of NameWithTags() nameWithTags string `json:"-"` } diff --git a/vendor/gopkg.in/raintank/schema.v1/metric_gen_test.go b/vendor/gopkg.in/raintank/schema.v1/metric_gen_test.go new file mode 100644 index 0000000000..955b2a6853 --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v1/metric_gen_test.go @@ -0,0 +1,351 @@ +package schema + +// NOTE: THIS FILE WAS PRODUCED BY THE +// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) +// DO NOT EDIT + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalMetricData(t *testing.T) { + v := MetricData{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetricData(b *testing.B) { + v := MetricData{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetricData(b *testing.B) { + v := MetricData{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetricData(b *testing.B) { + v := MetricData{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetricData(t *testing.T) { + v := MetricData{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := MetricData{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetricData(b *testing.B) { + v := MetricData{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetricData(b *testing.B) { + v := MetricData{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalMetricDataArray(t *testing.T) { + v := MetricDataArray{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetricDataArray(b *testing.B) { + v := MetricDataArray{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetricDataArray(b *testing.B) { + v := MetricDataArray{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetricDataArray(b *testing.B) { + v := MetricDataArray{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetricDataArray(t *testing.T) { + v := MetricDataArray{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := MetricDataArray{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetricDataArray(b *testing.B) { + v := MetricDataArray{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetricDataArray(b *testing.B) { + v := MetricDataArray{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalMetricDefinition(t *testing.T) { + v := MetricDefinition{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgMetricDefinition(b *testing.B) { + v := MetricDefinition{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgMetricDefinition(b *testing.B) { + v := MetricDefinition{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalMetricDefinition(b *testing.B) { + v := MetricDefinition{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodeMetricDefinition(t *testing.T) { + v := MetricDefinition{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := MetricDefinition{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodeMetricDefinition(b *testing.B) { + v := MetricDefinition{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodeMetricDefinition(b *testing.B) { + v := MetricDefinition{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v1/metric_serialization_bench_test.go b/vendor/gopkg.in/raintank/schema.v1/metric_serialization_bench_test.go new file mode 100644 index 0000000000..cda0d7eb7c --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v1/metric_serialization_bench_test.go @@ -0,0 +1,144 @@ +package schema + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "math/rand" + "strconv" + "testing" +) + +func getDifferentMetrics(amount int) []*MetricData { + names := []string{ + "litmus.http.error_state.", + "litmus.hello.dieter_plaetinck.be", + "litmus.ok.raintank_dns_error_state_foo_longer", + "hi.alerting.state", + } + intervals := []int{1, 10, 60} + tags := [][]string{ + { + "foo:bar", + "endpoint_id:25", + "collector_id:hi", + }, + { + "foo_bar:quux", + "endpoint_id:25", + "collector_id:hi", + "some_other_tag:ok", + }, + } + r := rand.New(rand.NewSource(438)) + out := make([]*MetricData, amount) + for i := 0; i < amount; i++ { + out[i] = &MetricData{ + OrgId: i, + Name: names[i%len(names)] + "foo.bar" + strconv.Itoa(i), + Metric: names[i%len(names)], + Interval: intervals[i%len(intervals)], + Value: r.Float64(), + Unit: "foo", + Time: r.Int63(), + Mtype: "bleh", + Tags: tags[i%len(tags)], + } + } + return out +} + +func BenchmarkSerialize3000MetricsJson(b *testing.B) { + metrics := getDifferentMetrics(3000) + b.ResetTimer() + var size int + for n := 0; n < b.N; n++ { + i, err := json.Marshal(metrics) + if err != nil { + panic(err) + } + size = len(i) + } + b.Log("final size:", size) +} + +func BenchmarkDeSerialize3000MetricsJson(b *testing.B) { + metrics := getDifferentMetrics(3000) + data, err := json.Marshal(metrics) + if err != nil { + panic(err) + } + out := make([]*MetricData, 0) + b.ResetTimer() + for n := 0; n < b.N; n++ { + err := json.Unmarshal(data, &out) + if err != nil { + panic(err) + } + } +} + +func BenchmarkSerialize3000MetricsGob(b *testing.B) { + metrics := getDifferentMetrics(3000) + var size int + b.ResetTimer() + for n := 0; n < b.N; n++ { + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(metrics) + if err != nil { + panic(err) + } + size = buf.Len() + } + b.Log("final size:", size) +} +func BenchmarkDeSerialize3000MetricsGob(b *testing.B) { + metrics := getDifferentMetrics(3000) + var buf bytes.Buffer + enc := gob.NewEncoder(&buf) + err := enc.Encode(metrics) + if err != nil { + } + out := make([]*MetricData, 0) + data := buf.Bytes() + b.ResetTimer() + for n := 0; n < b.N; n++ { + buf := bytes.NewBuffer(data) + dec := gob.NewDecoder(buf) + err := dec.Decode(&out) + if err != nil { + panic(err) + } + } +} + +func BenchmarkSerialize3000MetricsMsgp(b *testing.B) { + metrics := getDifferentMetrics(3000) + var size int + b.ResetTimer() + for n := 0; n < b.N; n++ { + m := MetricDataArray(metrics) + data, err := m.MarshalMsg(nil) + if err != nil { + panic(err) + } + size = len(data) + } + b.Log("final size:", size) +} +func BenchmarkDeSerialize3000MetricsMsgp(b *testing.B) { + metrics := getDifferentMetrics(3000) + m := MetricDataArray(metrics) + data, err := m.MarshalMsg(nil) + if err != nil { + } + var out MetricDataArray + b.ResetTimer() + for n := 0; n < b.N; n++ { + _, err := out.UnmarshalMsg(data) + if err != nil { + panic(err) + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v1/metric_test.go b/vendor/gopkg.in/raintank/schema.v1/metric_test.go new file mode 100644 index 0000000000..6d5224c1c1 --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v1/metric_test.go @@ -0,0 +1,135 @@ +package schema + +import ( + "reflect" + "sort" + "testing" + "unsafe" +) + +func BenchmarkSetId(b *testing.B) { + metric := MetricData{ + OrgId: 1234, + Name: "key1=val1.key2=val2.my.test.metric.name", + Metric: "my.test.metric.name", + Interval: 15, + Value: 0.1234, + Unit: "ms", + Time: 1234567890, + Mtype: "gauge", + Tags: []string{"key1:val1", "key2:val2"}, + } + for i := 0; i < b.N; i++ { + metric.SetId() + } +} + +func TestTagValidation(t *testing.T) { + type testCase struct { + tag []string + expecting bool + } + + testCases := []testCase{ + {[]string{"abc=cba"}, true}, + {[]string{"a="}, false}, + {[]string{"a!="}, false}, + {[]string{"=abc"}, false}, + {[]string{"@#$%!=(*&"}, false}, + {[]string{"!@#$%=(*&"}, false}, + {[]string{"@#;$%=(*&"}, false}, + {[]string{"@#$%=(;*&"}, false}, + {[]string{"@#$%=(*&"}, true}, + {[]string{"@#$%=(*&", "abc=!fd", "a===="}, true}, + {[]string{"@#$%=(*&", "abc=!fd", "a===;="}, false}, + } + + for _, tc := range testCases { + if tc.expecting != validateTags(tc.tag) { + t.Fatalf("Expected %t, but testcase %s returned %t", tc.expecting, tc.tag, !tc.expecting) + } + } +} + +func newMetricDefinition(name string, tags []string) *MetricDefinition { + sort.Strings(tags) + + return &MetricDefinition{Name: name, Tags: tags} +} + +func TestNameWithTags(t *testing.T) { + type testCase struct { + expectedName string + expectedNameWithTags string + expectedTags []string + md MetricDefinition + } + + testCases := []testCase{ + { + "a.b.c", + "a.b.c;tag1=value1", + []string{"tag1=value1"}, + *newMetricDefinition("a.b.c", []string{"tag1=value1", "name=ccc"}), + }, { + "a.b.c", + "a.b.c;a=a;b=b;c=c", + []string{"a=a", "b=b", "c=c"}, + *newMetricDefinition("a.b.c", []string{"name=a.b.c", "c=c", "b=b", "a=a"}), + }, { + "a.b.c", + "a.b.c", + []string{}, + *newMetricDefinition("a.b.c", []string{"name=a.b.c"}), + }, { + "a.b.c", + "a.b.c", + []string{}, + *newMetricDefinition("a.b.c", []string{}), + }, { + "c", + "c;a=a;b=b;c=c", + []string{"a=a", "b=b", "c=c"}, + *newMetricDefinition("c", []string{"c=c", "a=a", "b=b"}), + }, + } + + for _, tc := range testCases { + tc.md.SetId() + if tc.expectedName != tc.md.Name { + t.Fatalf("Expected name %s, but got %s", tc.expectedName, tc.md.Name) + } + + if tc.expectedNameWithTags != tc.md.NameWithTags() { + t.Fatalf("Expected name with tags %s, but got %s", tc.expectedNameWithTags, tc.md.NameWithTags()) + } + + if len(tc.expectedTags) != len(tc.md.Tags) { + t.Fatalf("Expected tags %+v, but got %+v", tc.expectedTags, tc.md.Tags) + } + + for i := range tc.expectedTags { + if len(tc.expectedTags[i]) != len(tc.md.Tags[i]) { + t.Fatalf("Expected tags %+v, but got %+v", tc.expectedTags, tc.md.Tags) + } + } + + getAddress := func(s string) uint { + return uint((*reflect.StringHeader)(unsafe.Pointer(&s)).Data) + } + + nameWithTagsAddr := getAddress(tc.md.NameWithTags()) + nameAddr := getAddress(tc.md.Name) + if nameAddr != nameWithTagsAddr { + t.Fatalf("Name slice does not appear to be slice of base string, %d != %d", nameAddr, nameWithTagsAddr) + } + + for i := range tc.md.Tags { + tagAddr := getAddress(tc.md.Tags[i]) + + if tagAddr < nameWithTagsAddr || tagAddr >= nameWithTagsAddr+uint(len(tc.md.NameWithTags())) { + t.Fatalf("Tag slice does not appear to be slice of base string, %d != %d", tagAddr, nameWithTagsAddr) + } + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v1/msg/format.go b/vendor/gopkg.in/raintank/schema.v1/msg/format.go index 59803fdcc9..04cd71b696 100644 --- a/vendor/gopkg.in/raintank/schema.v1/msg/format.go +++ b/vendor/gopkg.in/raintank/schema.v1/msg/format.go @@ -9,8 +9,3 @@ const ( FormatMetricDataArrayJson Format = iota FormatMetricDataArrayMsgp ) - -const ( - FormatProbeEventJson Format = iota - FormatProbeEventMsgp -) diff --git a/vendor/gopkg.in/raintank/schema.v1/msg/msg.go b/vendor/gopkg.in/raintank/schema.v1/msg/msg.go index f6af779cf8..26a5d61a15 100644 --- a/vendor/gopkg.in/raintank/schema.v1/msg/msg.go +++ b/vendor/gopkg.in/raintank/schema.v1/msg/msg.go @@ -6,10 +6,8 @@ import ( "encoding/json" "errors" "fmt" - "strings" "time" - "github.com/codeskyblue/go-uuid" "gopkg.in/raintank/schema.v1" ) @@ -25,25 +23,6 @@ type MetricData struct { Msg []byte } -type ProbeEvent struct { - Id int64 - Produced time.Time - Event *schema.ProbeEvent - Format Format - Msg []byte -} - -type ProbeEventJson struct { - Id string `json:"id"` - EventType string `json:"event_type"` - OrgId int64 `json:"org_id"` - Severity string `json:"severity"` - Source string `json:"source"` - Timestamp int64 `json:"timestamp"` - Message string `json:"message"` - Tags []string `json:"tags"` -} - // parses format and id (cheap), but doesn't decode metrics (expensive) just yet. func (m *MetricData) InitFromMsg(msg []byte) error { if len(msg) < 9 { @@ -112,93 +91,3 @@ func CreateMsg(metrics []*schema.MetricData, id int64, version Format) ([]byte, } return buf.Bytes(), nil } - -func ProbeEventFromMsg(msg []byte) (*ProbeEvent, error) { - e := &ProbeEvent{ - Event: &schema.ProbeEvent{}, - Msg: msg, - } - if len(msg) < 9 { - return e, errTooSmall - } - - buf := bytes.NewReader(msg[1:9]) - binary.Read(buf, binary.BigEndian, &e.Id) - e.Produced = time.Unix(0, e.Id) - - format := Format(msg[0]) - if format != FormatProbeEventJson && format != FormatProbeEventMsgp { - return e, fmt.Errorf(errFmtUnknownFormat, format) - } - e.Format = format - return e, nil -} - -func (e *ProbeEvent) DecodeProbeEvent() error { - var err error - switch e.Format { - case FormatProbeEventJson: - oldFormat := &ProbeEventJson{} - err = json.Unmarshal(e.Msg[9:], oldFormat) - //convert our []string of key:valy pairs to - // map[string]string - tags := make(map[string]string) - for _, t := range oldFormat.Tags { - parts := strings.SplitN(t, ":", 2) - tags[parts[0]] = parts[1] - } - e.Event = &schema.ProbeEvent{ - Id: oldFormat.Id, - EventType: oldFormat.EventType, - OrgId: oldFormat.OrgId, - Severity: oldFormat.Severity, - Source: oldFormat.Source, - Timestamp: oldFormat.Timestamp, - Message: oldFormat.Message, - Tags: tags, - } - case FormatProbeEventMsgp: - _, err = e.Event.UnmarshalMsg(e.Msg[9:]) - default: - return fmt.Errorf(errFmtUnknownFormat, e.Msg[0]) - } - if err != nil { - return fmt.Errorf("ERROR: failure to unmarshal message body via format %q: %s", e.Format, err) - } - return nil -} - -func CreateProbeEventMsg(event *schema.ProbeEvent, id int64, version Format) ([]byte, error) { - if event.Id == "" { - // per http://blog.mikemccandless.com/2014/05/choosing-fast-unique-identifier-uuid.html, - // using V1 UUIDs is much faster than v4 like we were using - u := uuid.NewUUID() - event.Id = u.String() - } - buf := new(bytes.Buffer) - err := binary.Write(buf, binary.LittleEndian, uint8(version)) - if err != nil { - return nil, fmt.Errorf(errFmtBinWriteFailed, err) - } - err = binary.Write(buf, binary.BigEndian, id) - if err != nil { - return nil, fmt.Errorf(errFmtBinWriteFailed, err) - } - var msg []byte - switch version { - case FormatProbeEventJson: - msg, err = json.Marshal(event) - case FormatProbeEventMsgp: - msg, err = event.MarshalMsg(nil) - default: - return nil, fmt.Errorf(errFmtUnknownFormat, version) - } - if err != nil { - return nil, fmt.Errorf("Failed to marshal metrics payload: %s", err) - } - _, err = buf.Write(msg) - if err != nil { - return nil, fmt.Errorf(errFmtBinWriteFailed, err) - } - return buf.Bytes(), nil -} diff --git a/vendor/gopkg.in/raintank/schema.v1/point_gen_test.go b/vendor/gopkg.in/raintank/schema.v1/point_gen_test.go new file mode 100644 index 0000000000..33faf8f0a3 --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v1/point_gen_test.go @@ -0,0 +1,125 @@ +package schema + +// NOTE: THIS FILE WAS PRODUCED BY THE +// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) +// DO NOT EDIT + +import ( + "bytes" + "testing" + + "github.com/tinylib/msgp/msgp" +) + +func TestMarshalUnmarshalPoint(t *testing.T) { + v := Point{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgPoint(b *testing.B) { + v := Point{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgPoint(b *testing.B) { + v := Point{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalPoint(b *testing.B) { + v := Point{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestEncodeDecodePoint(t *testing.T) { + v := Point{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + + m := v.Msgsize() + if buf.Len() > m { + t.Logf("WARNING: Msgsize() for %v is inaccurate", v) + } + + vn := Point{} + err := msgp.Decode(&buf, &vn) + if err != nil { + t.Error(err) + } + + buf.Reset() + msgp.Encode(&buf, &v) + err = msgp.NewReader(&buf).Skip() + if err != nil { + t.Error(err) + } +} + +func BenchmarkEncodePoint(b *testing.B) { + v := Point{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + en := msgp.NewWriter(msgp.Nowhere) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.EncodeMsg(en) + } + en.Flush() +} + +func BenchmarkDecodePoint(b *testing.B) { + v := Point{} + var buf bytes.Buffer + msgp.Encode(&buf, &v) + b.SetBytes(int64(buf.Len())) + rd := msgp.NewEndlessReader(buf.Bytes(), b) + dc := msgp.NewReader(rd) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + err := v.DecodeMsg(dc) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/gopkg.in/raintank/schema.v1/reslice_test.go b/vendor/gopkg.in/raintank/schema.v1/reslice_test.go new file mode 100644 index 0000000000..87ffad8bd7 --- /dev/null +++ b/vendor/gopkg.in/raintank/schema.v1/reslice_test.go @@ -0,0 +1,66 @@ +package schema + +import ( + "testing" +) + +type testCase struct { + inSize int + subSize int +} + +func TestReslice(t *testing.T) { + cases := []testCase{ + {10, 1}, + {10, 2}, + {10, 3}, + {10, 4}, + {10, 5}, + {10, 6}, + {10, 7}, + {10, 8}, + {10, 9}, + {10, 10}, + {10, 11}, + {100, 1}, + {100, 13}, + {100, 39}, + {100, 74}, + {100, 143}, + {100, 5000}, + } + for _, c := range cases { + in := make([]*MetricData, c.inSize) + for i := 0; i < c.inSize; i++ { + in[i] = &MetricData{OrgId: i} + } + out := Reslice(in, c.subSize) + expectedLen := len(in) / c.subSize + fullSubSlices := len(in) / c.subSize + if len(in)%c.subSize != 0 { + expectedLen += 1 + } + if len(out) != expectedLen { + t.Fatalf("case %#v: out array len expected %d, got %d", c, expectedLen, len(out)) + } + for i := 0; i < fullSubSlices; i++ { + if len(out[i]) != c.subSize { + t.Fatalf("out sub array %d len expected %d, got %d", i, c.subSize, len(out[i])) + } + } + lastSize := len(in) % c.subSize + if lastSize == 0 { + lastSize = c.subSize + } + if len(out[len(out)-1]) != lastSize { + t.Fatalf("out last sub array len expected %d, got %d", lastSize, len(out[len(out)-1])) + } + for i := 0; i < len(in); i++ { + subArray := i / c.subSize + subI := i % c.subSize + if in[i] != out[subArray][subI] { + t.Fatalf("element mismatch. in: %v, out: %v", in[i], out[subArray][subI]) + } + } + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json deleted file mode 100644 index 40e8e42dbf..0000000000 --- a/vendor/vendor.json +++ /dev/null @@ -1,703 +0,0 @@ -{ - "comment": "", - "ignore": "test", - "package": [ - { - "checksumSHA1": "vPAHn6NQfXwZmzal7YBN93Fnd6I=", - "path": "github.com/Dieterbe/artisanalhistogram/hist12h", - "revision": "91cfed5bb965b5797e2e452023c431f017599dba", - "revisionTime": "2016-11-27T15:30:35Z" - }, - { - "checksumSHA1": "3f8ivK6IiacIw+mE3uOqoyJu/Mg=", - "path": "github.com/Dieterbe/artisanalhistogram/hist15s", - "revision": "91cfed5bb965b5797e2e452023c431f017599dba", - "revisionTime": "2016-11-27T15:30:35Z" - }, - { - "checksumSHA1": "P8h2SWEK3NsJleSPe+mVNNpLG6Y=", - "path": "github.com/Dieterbe/profiletrigger/heap", - "revision": "d90c4b0cfeed756381675e85cc6e6b8a02cb01a6", - "revisionTime": "2016-10-07T15:24:48Z" - }, - { - "checksumSHA1": "uqfutMkGN018AvjoQET06lhOM3g=", - "path": "github.com/Shopify/sarama", - "revision": "bd61cae2be85fa6ff40eb23dcdd24567967ac2ae", - "revisionTime": "2016-08-30T13:25:53Z" - }, - { - "checksumSHA1": "USQFwXWz6tO69wtZdj3zZDB1MA4=", - "path": "github.com/Sirupsen/logrus", - "revision": "55eb11d21d2a31a3cc93838241d04800f52e823d", - "revisionTime": "2015-04-09T23:08:25Z" - }, - { - "checksumSHA1": "ly9VLPE9GKo2U7mnbZyjb2LDQ3w=", - "path": "github.com/Unknwon/com", - "revision": "28b053d5a2923b87ce8c5a08f3af779894a72758", - "revisionTime": "2015-10-08T13:54:07Z" - }, - { - "checksumSHA1": "acGG5NV9NBtjspVlWZ86Zzg8pW8=", - "path": "github.com/alyu/configparser", - "revision": "26b2fe18bee125de2a3090d6fadb7e280e63eba6", - "revisionTime": "2015-11-25T02:12:32Z" - }, - { - "checksumSHA1": "IElOf5GADxpb4XyVnR++sPWq504=", - "path": "github.com/araddon/gou", - "revision": "cf9cf25f52be174c5878920a8021bd224cbe32c7", - "revisionTime": "2015-04-25T18:03:51Z" - }, - { - "checksumSHA1": "x2JXwE0SXPAnBRJTcP/z920hfLY=", - "path": "github.com/armon/go-metrics", - "revision": "06b60999766278efd6d2b5d8418a58c3d5b99e87", - "revisionTime": "2015-12-07T02:54:52Z" - }, - { - "checksumSHA1": "Wqg57i6dQo9ZvTAifQscSij80dE=", - "path": "github.com/bitly/go-hostpool", - "revision": "d0e59c22a56e8dadfed24f74f452cea5a52722d2", - "revisionTime": "2015-03-31T13:04:00Z" - }, - { - "checksumSHA1": "nznoOqbwnj0kFNiIakBWCRQaaHA=", - "path": "github.com/bsm/sarama-cluster", - "revision": "11887f57ba85b075757463e9a4ffcfb0851ddff3", - "revisionTime": "2016-12-06T10:26:25Z" - }, - { - "checksumSHA1": "n6WHmaVaxufQthaT1tYnqTmtSsY=", - "path": "github.com/codeskyblue/go-uuid", - "revision": "952abbca900b023c1a80bc522ff4795db50d9d6c", - "revisionTime": "2014-02-08T11:57:53Z" - }, - { - "checksumSHA1": "dvabztWVQX8f6oMLRyv4dLH+TGY=", - "path": "github.com/davecgh/go-spew/spew", - "revision": "346938d642f2ec3594ed81d874461961cd0faa76", - "revisionTime": "2016-10-29T20:57:26Z" - }, - { - "checksumSHA1": "vwGGesK6k8uQgnwew609k9Jvcb8=", - "path": "github.com/dgryski/go-bits", - "revision": "2c7641e7dfe3945a0fe755f58c85ab306624956d", - "revisionTime": "2015-09-21T07:33:52Z" - }, - { - "checksumSHA1": "e4U50WUz2ycziirb3+JAVz3t49w=", - "path": "github.com/dgryski/go-linlog", - "revision": "f18bb8a4e7bcd60fd4fb99f3e8752f5da20f70a2", - "revisionTime": "2016-05-05T06:20:16Z" - }, - { - "checksumSHA1": "bJkM/x02zeuwA6avbGn2yvqjp20=", - "path": "github.com/dgryski/go-tsz", - "revision": "5f4c484a9e838989c201ff437b3cab59cfdc46c8", - "revisionTime": "2016-03-17T11:56:01Z" - }, - { - "checksumSHA1": "dn4hgSC9vkkMREpbcoMihPGCA88=", - "path": "github.com/dgryski/go-tsz/testdata", - "revision": "5f4c484a9e838989c201ff437b3cab59cfdc46c8", - "revisionTime": "2016-03-17T11:56:01Z" - }, - { - "checksumSHA1": "HoN/78ovv3/DC+kDKF7IENEc40g=", - "path": "github.com/docker/docker/api/types", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "jVJDbe0IcyjoKc2xbohwzQr+FF0=", - "path": "github.com/docker/docker/api/types/blkiodev", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "uhgObLWZ3XZE8mdf6ovciqBgljQ=", - "path": "github.com/docker/docker/api/types/container", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "XDP7i6sMYGnUKeFzgt+mFBJwjjw=", - "path": "github.com/docker/docker/api/types/events", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "S4SWOa0XduRd8ene8Alwih2Nwcw=", - "path": "github.com/docker/docker/api/types/filters", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "yeB781yxPhnN6OXQ9/qSsyih3ek=", - "path": "github.com/docker/docker/api/types/image", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "uJeLBKpHZXP+bWhXP4HhpyUTWYI=", - "path": "github.com/docker/docker/api/types/mount", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "Gskp+nvbVe8Gk1xPLHylZvNmqTg=", - "path": "github.com/docker/docker/api/types/network", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "r2vWq7Uc3ExKzMqYgH0b4AKjLKY=", - "path": "github.com/docker/docker/api/types/registry", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "VTxWyFud/RedrpllGdQonVtGM/A=", - "path": "github.com/docker/docker/api/types/strslice", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "Q0U3queMsCw+rPPztXnRHwAxQEc=", - "path": "github.com/docker/docker/api/types/swarm", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "mi8EDCDjtrZEONRXPG7VHJosDwY=", - "path": "github.com/docker/docker/api/types/swarm/runtime", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "77axKFOjRx1nGrzIggGXfTxUYVQ=", - "path": "github.com/docker/docker/api/types/time", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "uDPQ3nHsrvGQc9tg/J9OSC4N5dQ=", - "path": "github.com/docker/docker/api/types/versions", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "IBJy2zPEnYmcFJ3lM1eiRWnCxTA=", - "path": "github.com/docker/docker/api/types/volume", - "revision": "edc204b1ffd53252649917fe54daa0b8419ed4ec", - "revisionTime": "2017-11-17T23:48:26Z" - }, - { - "checksumSHA1": "y2Kh4iPlgCPXSGTCcFpzePYdzzg=", - "path": "github.com/eapache/go-resiliency/breaker", - "revision": "b86b1ec0dd4209a588dc1285cdd471e73525c0b3", - "revisionTime": "2016-01-04T19:15:39Z" - }, - { - "checksumSHA1": "WHl96RVZlOOdF4Lb1OOadMpw8ls=", - "path": "github.com/eapache/go-xerial-snappy", - "revision": "bb955e01b9346ac19dc29eb16586c90ded99a98c", - "revisionTime": "2016-06-09T14:24:08Z" - }, - { - "checksumSHA1": "AEGF/lRMFJukJAnRqdCVQ/0I+BY=", - "path": "github.com/eapache/queue", - "revision": "ded5959c0d4e360646dc9e9908cff48666781367", - "revisionTime": "2015-06-06T11:53:03Z" - }, - { - "checksumSHA1": "OkqfwXeTVoiIxNMDA7HKvmrCDw8=", - "path": "github.com/go-macaron/binding", - "revision": "a453235199f8898e01647db8820f937a184fbd09", - "revisionTime": "2016-11-15T07:09:50Z" - }, - { - "checksumSHA1": "y0olVbiMQ6/UOa/eh52XYnies90=", - "path": "github.com/go-macaron/inject", - "revision": "d8a0b8677191f4380287cfebd08e462217bac7ad", - "revisionTime": "2016-06-27T17:00:12Z" - }, - { - "checksumSHA1": "9J/H5Vie1fPxR1JTL00If9G6AQ4=", - "path": "github.com/gocql/gocql", - "revision": "066e974c166d59aa2d3aee45b234d8c21c631180", - "revisionTime": "2017-08-31T15:20:34Z" - }, - { - "checksumSHA1": "Z3N6HDGWcvcNu0FloZRq54uO3h4=", - "path": "github.com/gocql/gocql/internal/lru", - "revision": "066e974c166d59aa2d3aee45b234d8c21c631180", - "revisionTime": "2017-08-31T15:20:34Z" - }, - { - "checksumSHA1": "ctK9mwZKnt/8dHxx2Ef6nZTljZs=", - "path": "github.com/gocql/gocql/internal/murmur", - "revision": "066e974c166d59aa2d3aee45b234d8c21c631180", - "revisionTime": "2017-08-31T15:20:34Z" - }, - { - "checksumSHA1": "tZQDfMMTKrYMXqen0zjJWLtOf1A=", - "path": "github.com/gocql/gocql/internal/streams", - "revision": "066e974c166d59aa2d3aee45b234d8c21c631180", - "revisionTime": "2017-08-31T15:20:34Z" - }, - { - "checksumSHA1": "OzddgAh0uv3ukKhWW7QFUEFbxow=", - "path": "github.com/golang/snappy", - "revision": "553a641470496b2327abcac10b36396bd98e45c9", - "revisionTime": "2017-02-15T23:32:05Z" - }, - { - "checksumSHA1": "heMa8aseS2k6NeJYumwo/xeKToY=", - "path": "github.com/gopherjs/gopherjs/js", - "revision": "4b53e1bddba0e2f734514aeb6c02db652f4c6fe8", - "revisionTime": "2016-01-29T23:51:36Z" - }, - { - "checksumSHA1": "wg1YpUMVhnUaoz26QhOWEhIT1dE=", - "path": "github.com/hailocab/go-hostpool", - "revision": "e80d13ce29ede4452c43dea11e79b9bc8a15b478", - "revisionTime": "2016-01-25T11:53:50Z" - }, - { - "checksumSHA1": "cdOCt0Yb+hdErz8NAQqayxPmRsY=", - "path": "github.com/hashicorp/errwrap", - "revision": "7554cd9344cec97297fa6649b055a8c98c2a1e55", - "revisionTime": "2014-10-28T05:47:10Z" - }, - { - "checksumSHA1": "TNlVzNR1OaajcNi3CbQ3bGbaLGU=", - "path": "github.com/hashicorp/go-msgpack/codec", - "revision": "fa3f63826f7c23912c15263591e65d54d080b458", - "revisionTime": "2015-05-18T23:42:57Z" - }, - { - "checksumSHA1": "lrSl49G23l6NhfilxPM0XFs5rZo=", - "path": "github.com/hashicorp/go-multierror", - "revision": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5", - "revisionTime": "2015-09-16T20:57:42Z" - }, - { - "checksumSHA1": "eCWvhgknHMj5K19ePPjIA3l401Q=", - "path": "github.com/hashicorp/go-sockaddr", - "revision": "9b4c5fa5b10a683339a270d664474b9f4aee62fc", - "revisionTime": "2017-10-30T10:43:12Z" - }, - { - "checksumSHA1": "88DoUaWD6hS1KTt57RMQ7wxHu/k=", - "path": "github.com/hashicorp/memberlist", - "revision": "9bdd37bfb26bd039c08b0f36be6f80ceede4aaf3", - "revisionTime": "2017-11-17T04:34:18Z" - }, - { - "checksumSHA1": "oIkoHb8+rM5Etur5HhZVY/sDQKQ=", - "path": "github.com/jpillora/backoff", - "revision": "06c7a16c845dc8e0bf575fafeeca0f5462f5eb4d", - "revisionTime": "2017-02-22T00:19:28Z" - }, - { - "checksumSHA1": "tewA7jXVGCw1zb5mA0BDecWi4iQ=", - "path": "github.com/jtolds/gls", - "revision": "8ddce2a84170772b95dd5d576c48d517b22cac63", - "revisionTime": "2016-01-05T22:08:40Z" - }, - { - "checksumSHA1": "6s2IAJ1smhtl7YePdwZZ1J2zqeA=", - "path": "github.com/kisielk/og-rek", - "revision": "ec792bc6e6aa06a6c490e8d292e15cca173c8bd3", - "revisionTime": "2017-04-05T22:37:46Z" - }, - { - "checksumSHA1": "o7abpsEIXBLz5n/khgI2QPRqSQA=", - "path": "github.com/kisielk/whisper-go/whisper", - "revision": "82e8091afdea241119c34a452fe24fcc2a0b962e", - "revisionTime": "2014-01-12T13:57:52Z" - }, - { - "checksumSHA1": "+CqJGh7NIDMnHgScq9sl9tPrnVM=", - "path": "github.com/klauspost/compress/flate", - "revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6", - "revisionTime": "2017-02-18T08:16:04Z" - }, - { - "checksumSHA1": "V1lQwkoDR1fPmZBSgkmZjgZofeU=", - "path": "github.com/klauspost/compress/gzip", - "revision": "14c9a76e3c95e47f8ccce949bba2c1101a8b85e6", - "revisionTime": "2017-02-18T08:16:04Z" - }, - { - "checksumSHA1": "iKPMvbAueGfdyHcWCgzwKzm8WVo=", - "path": "github.com/klauspost/cpuid", - "revision": "09cded8978dc9e80714c4d85b0322337b0a1e5e0", - "revisionTime": "2016-03-02T07:53:16Z" - }, - { - "checksumSHA1": "7ttJJBMDGKL63tX23fNmW7r7NvQ=", - "path": "github.com/klauspost/crc32", - "revision": "6834731faf32e62a2dd809d99fb24d1e4ae5a92d", - "revisionTime": "2016-01-12T14:50:11Z" - }, - { - "checksumSHA1": "03SwtyHKqhFk9IT7ZboF4I1S5Bw=", - "path": "github.com/mattbaird/elastigo/lib", - "revision": "34c4c4d8425cbdcbc8e257943a2044d5e9f7dab5", - "revisionTime": "2016-05-02T23:32:43Z" - }, - { - "checksumSHA1": "j4CCNdvlzHLOMA7gyS7wzIfwBTw=", - "path": "github.com/metrics20/go-metrics20/carbon20", - "revision": "55e770486f161cc83baf4e7178f7bb928dbc5716", - "revisionTime": "2017-08-11T09:14:05Z" - }, - { - "checksumSHA1": "WKGTGMb40ppaVUJa6AXc+/6ghUk=", - "path": "github.com/miekg/dns", - "revision": "48c8acaf0c2dc19fbb4f1b2776c1cee4e6f65aa0", - "revisionTime": "2016-06-08T16:10:03Z" - }, - { - "checksumSHA1": "IwuaGWkq9sh9hzmmgkyHvCEMGzM=", - "path": "github.com/mreiferson/go-snappystream", - "revision": "028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504", - "revisionTime": "2015-04-16T23:44:20Z" - }, - { - "checksumSHA1": "5mGTJWVBk+2KdbBQoJWMtGoJJgU=", - "path": "github.com/mreiferson/go-snappystream/snappy-go", - "revision": "028eae7ab5c4c9e2d1cb4c4ca1e53259bbe7e504", - "revisionTime": "2015-04-16T23:44:20Z" - }, - { - "checksumSHA1": "HXnVlKQKXv0Ss+c9+FUFXfXQvuA=", - "path": "github.com/nsqio/go-nsq", - "revision": "642a3f9935f12cb3b747294318d730f56f4c34b4", - "revisionTime": "2016-04-15T21:07:02Z" - }, - { - "checksumSHA1": "hpXnjCM/a0edhWUb2+and0mnPyY=", - "path": "github.com/opentracing/opentracing-go", - "revision": "8ebe5d4e236eed9fd88e593c288bfb804d630b8c", - "revisionTime": "2017-08-06T19:21:16Z" - }, - { - "checksumSHA1": "uhDxBvLEqRAMZKgpTZ8MFuLIIM8=", - "path": "github.com/opentracing/opentracing-go/ext", - "revision": "8ebe5d4e236eed9fd88e593c288bfb804d630b8c", - "revisionTime": "2017-08-06T19:21:16Z" - }, - { - "checksumSHA1": "tnkdNJbJxNKuPZMWapP1xhKIIGw=", - "path": "github.com/opentracing/opentracing-go/log", - "revision": "8ebe5d4e236eed9fd88e593c288bfb804d630b8c", - "revisionTime": "2017-08-06T19:21:16Z" - }, - { - "checksumSHA1": "3/7ZZ8ni2H6p5EDQ/Ij7Aw/VLZA=", - "path": "github.com/philhofer/fwd", - "revision": "92647f2bd94a89b170c19e96e6456dd64ac37e1a", - "revisionTime": "2015-11-20T02:40:02Z" - }, - { - "checksumSHA1": "z0EFMfc2jG9gWobdTvDtqblBHuk=", - "path": "github.com/raintank/dur", - "revision": "6ce9ec78e3a2ee918588104869369e5527aefe88", - "revisionTime": "2017-07-08T19:17:46Z" - }, - { - "checksumSHA1": "BmaVwqQ2CN9byGuy9RUl482cIMM=", - "path": "github.com/raintank/gziper", - "revision": "ad70bdb176fa32c61c29d21cd053bdfd9f0ec329", - "revisionTime": "2017-05-23T16:27:42Z" - }, - { - "checksumSHA1": "G5q2mryb77aa5RqtRSIiIXZpTcA=", - "path": "github.com/raintank/misc/app", - "revision": "cb73203311ae2489b845469f622678f24ff98a16", - "revisionTime": "2016-07-13T17:23:02Z" - }, - { - "checksumSHA1": "oCMADD0xqElYRIXBgNkBGHXGQyk=", - "path": "github.com/raintank/worldping-api/pkg/log", - "revision": "66b28f1160d1e0d7d236b5bd96a5d8eee627357c", - "revisionTime": "2016-07-14T10:38:05Z" - }, - { - "checksumSHA1": "kwZn/TNKc3VzadRTFiSkVRX+bTs=", - "path": "github.com/rakyll/globalconf", - "revision": "415abc325023f1a00cd2d9fa512e0e71745791a2", - "revisionTime": "2014-08-18T21:38:18Z" - }, - { - "checksumSHA1": "zFfrdRz7vZro+hrdAeiZ8ldXWQQ=", - "path": "github.com/rakyll/goini", - "revision": "907cca0f578a5316fb864ec6992dc3d9730ec58c", - "revisionTime": "2014-01-12T23:31:26Z" - }, - { - "checksumSHA1": "LjPdvMphElL0GOVNQCsmZMVgWIw=", - "path": "github.com/rs/cors", - "revision": "a62a804a8a009876ca59105f7899938a1349f4b3", - "revisionTime": "2016-06-17T23:19:35Z" - }, - { - "checksumSHA1": "hCRfPlNpqv8tvVivLzmXsoUOf1c=", - "path": "github.com/rs/xhandler", - "revision": "ed27b6fd65218132ee50cd95f38474a3d8a2cd12", - "revisionTime": "2016-06-18T19:32:21Z" - }, - { - "checksumSHA1": "tnMZLo/kR9Kqx6GtmWwowtTLlA8=", - "path": "github.com/sean-/seed", - "revision": "e2103e2c35297fb7e17febb81e49b312087a2372", - "revisionTime": "2017-03-13T16:33:22Z" - }, - { - "checksumSHA1": "v7C+aJ1D/z3MEeCte6bxvpoGjM4=", - "path": "github.com/sergi/go-diff/diffmatchpatch", - "revision": "feef008d51ad2b3778f85d387ccf91735543008d", - "revisionTime": "2017-04-09T07:17:39Z" - }, - { - "checksumSHA1": "r8yq1UeCPjRb4GO2NyHqEyFxb1w=", - "path": "github.com/smartystreets/assertions", - "revision": "443d812296a84445c202c085f19e18fc238f8250", - "revisionTime": "2016-02-01T21:43:16Z" - }, - { - "checksumSHA1": "9iA8MD7dsY0eid8vy+1ixJHkR+M=", - "path": "github.com/smartystreets/assertions/internal/go-render/render", - "revision": "443d812296a84445c202c085f19e18fc238f8250", - "revisionTime": "2016-02-01T21:43:16Z" - }, - { - "checksumSHA1": "QCsUvPHx/Ifqm+sJmocjSvePAIc=", - "path": "github.com/smartystreets/assertions/internal/oglematchers", - "revision": "443d812296a84445c202c085f19e18fc238f8250", - "revisionTime": "2016-02-01T21:43:16Z" - }, - { - "checksumSHA1": "fQeXVv5U9dlo3ufH2vjk1GNf4Lo=", - "path": "github.com/smartystreets/goconvey/convey", - "revision": "995f5b2e021c69b8b028ba6d0b05c1dd500783db", - "revisionTime": "2016-01-19T22:16:36Z" - }, - { - "checksumSHA1": "9LakndErFi5uCXtY1KWl0iRnT4c=", - "path": "github.com/smartystreets/goconvey/convey/gotest", - "revision": "995f5b2e021c69b8b028ba6d0b05c1dd500783db", - "revisionTime": "2016-01-19T22:16:36Z" - }, - { - "checksumSHA1": "abzHJ7H+qXomUFCgEAZ5QXO+S5g=", - "path": "github.com/smartystreets/goconvey/convey/reporting", - "revision": "995f5b2e021c69b8b028ba6d0b05c1dd500783db", - "revisionTime": "2016-01-19T22:16:36Z" - }, - { - "checksumSHA1": "QMPn4XX/OM3SYq3leNhEty32hmk=", - "path": "github.com/syndtr/goleveldb/leveldb", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "cWxfp4yXqeUmOjyOaPpGTqgEuBI=", - "path": "github.com/syndtr/goleveldb/leveldb/cache", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "5KPgnvCPlR0ysDAqo6jApzRQ3tw=", - "path": "github.com/syndtr/goleveldb/leveldb/comparer", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "Vpvz4qmbq/kz0SN95yt0tmSI7JE=", - "path": "github.com/syndtr/goleveldb/leveldb/errors", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "eqKeD6DS7eNCtxVYZEHHRKkyZrw=", - "path": "github.com/syndtr/goleveldb/leveldb/filter", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "cRn09EwfU3k2ZjvClHYmVFlakRY=", - "path": "github.com/syndtr/goleveldb/leveldb/iterator", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "CMBbso8ZuG2kBGDL2Blf/wpeheU=", - "path": "github.com/syndtr/goleveldb/leveldb/journal", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "LshzRv+3spfwuHLepRxiyjf/3sQ=", - "path": "github.com/syndtr/goleveldb/leveldb/memdb", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "MP/sSiEbzIN5M664sO4r9+dwzV4=", - "path": "github.com/syndtr/goleveldb/leveldb/opt", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "XO5e4bQsWDdNqoHbFWy2TKoOWrQ=", - "path": "github.com/syndtr/goleveldb/leveldb/storage", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "U070NRTkkUTUFgiuANYiuu81SIU=", - "path": "github.com/syndtr/goleveldb/leveldb/table", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "4zil8Gwg8VPkDn1YzlgCvtukJFU=", - "path": "github.com/syndtr/goleveldb/leveldb/util", - "revision": "6ae1797c0b42b9323fc27ff7dcf568df88f2f33d", - "revisionTime": "2016-08-25T02:45:22Z" - }, - { - "checksumSHA1": "PcxMZX3aIoJbDC8ep9rdjTqdNwg=", - "path": "github.com/tinylib/msgp/msgp", - "revision": "0cea1fa86e8403be1284013014f87ab942056de8", - "revisionTime": "2015-10-23T22:38:53Z" - }, - { - "checksumSHA1": "5qKnuOcJoxq2UsMm8e8w9hOkNEc=", - "path": "github.com/uber/jaeger-client-go", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "Q3dweWgAV4ucIJiLgJwTSHnw1gw=", - "path": "github.com/uber/jaeger-client-go/config", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "XKa+O12fO64NK1H7XTE76W2K+HM=", - "path": "github.com/uber/jaeger-client-go/internal/baggage", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "E9tc/axv/gdW8LzWDQwG786K1VE=", - "path": "github.com/uber/jaeger-client-go/internal/baggage/remote", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "2YFNtVmzqktT363MIkePoTeS8lM=", - "path": "github.com/uber/jaeger-client-go/internal/spanlog", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "M6CT/tsjAdB3zjoVR2gPoiI4XOg=", - "path": "github.com/uber/jaeger-client-go/log", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "op9SpACI5fXM3UurkSpvk+3v+xk=", - "path": "github.com/uber/jaeger-client-go/rpcmetrics", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "x70FHhaTJkyGDNVd1XRjseXN/yg=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/agent", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "8GTKyCm4OZPKDcaLxt+qxy1Mk/w=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/baggage", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "TOIiprC8gAUlC/VVEpeMR6bIg8A=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/jaeger", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "bUNRwq+ZuDfL/Pk74LDnch+l+U8=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/sampling", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "XlbTGfqnmRg/rIOb82+KvcFsHjY=", - "path": "github.com/uber/jaeger-client-go/thrift-gen/zipkincore", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "UE75xCMkoreY735DXEHzdYudbz4=", - "path": "github.com/uber/jaeger-client-go/utils", - "revision": "377c5872ffb7b70ac7819205339019dbe9e9f4f2", - "revisionTime": "2017-08-07T21:05:17Z" - }, - { - "checksumSHA1": "9jjO5GjLa0XF/nfWihF02RoH4qc=", - "path": "golang.org/x/net/context", - "revision": "1aafd77e1e7f6849ad16a7bdeb65e3589a10b2bb", - "revisionTime": "2016-04-27T01:54:49Z" - }, - { - "checksumSHA1": "6f8MEU31llHM1sLM/GGH4/Qxu0A=", - "path": "gopkg.in/inf.v0", - "revision": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4", - "revisionTime": "2015-09-11T12:57:57Z" - }, - { - "checksumSHA1": "YRD335tkMvgHzkfbfveMUpsE3Bw=", - "path": "gopkg.in/ini.v1", - "revision": "6e4869b434bd001f6983749881c7ead3545887d8", - "revisionTime": "2016-08-27T06:11:18Z" - }, - { - "checksumSHA1": "wzuoHPA/Ccoe1CFnDL35rnL0JoI=", - "path": "gopkg.in/macaron.v1", - "revision": "4974334b10dbb6f5c0e17f4c10555ff050a16329", - "revisionTime": "2016-08-26T18:07:28Z" - }, - { - "checksumSHA1": "DohC0RpI4MSaPCG+Zhwvk/qCTYE=", - "path": "gopkg.in/raintank/schema.v0", - "revision": "b5eb018b887ddff47b9152f28f6f1513be9b2141", - "revisionTime": "2016-07-14T10:00:31Z" - }, - { - "checksumSHA1": "RcxQU1xxRElWcoMLYi4Cl6F5QXM=", - "path": "gopkg.in/raintank/schema.v1", - "revision": "68fc00d6506890a1cd6e79332697047bb03ee172", - "revisionTime": "2017-10-13T17:11:03Z" - }, - { - "checksumSHA1": "MjwLYwIckCtBwXhQMt5ji/L5lJA=", - "path": "gopkg.in/raintank/schema.v1/msg", - "revision": "a323316458b5df84827551e9b6b5f61cb2de423b", - "revisionTime": "2017-01-12T12:37:55Z" - } - ], - "rootPath": "github.com/grafana/metrictank" -}